text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
import type { ToolIOType, ToolOutputComponents } from "$lib/types/Tool"; export const ToolOutputPaths: Record< ToolOutputComponents, { type: ToolIOType; path: string; } > = { textbox: { type: "str", path: "$", }, markdown: { type: "str", path: "$", }, number: { type: "float", path: "$", }, image: { type: "file", path: "$.url", }, gallery: { type: "file", path: "$[*].image.url", }, audio: { type: "file", path: "$.url", }, video: { type: "file", path: "$.video.url", }, file: { type: "file", path: "$.url", }, json: { type: "str", path: "$", }, }; export const isValidOutputComponent = ( outputComponent: string ): outputComponent is keyof typeof ToolOutputPaths => { return Object.keys(ToolOutputPaths).includes(outputComponent); };
chat-ui/src/lib/server/tools/outputs.ts/0
{ "file_path": "chat-ui/src/lib/server/tools/outputs.ts", "repo_id": "chat-ui", "token_count": 344 }
87
import { chromium, devices, type Page, type BrowserContextOptions, type Response, type Browser, } from "playwright"; import { PlaywrightBlocker } from "@cliqz/adblocker-playwright"; import { config } from "$lib/server/config"; import { logger } from "$lib/server/logger"; import { onExit } from "$lib/server/exitHandler"; const blocker = config.PLAYWRIGHT_ADBLOCKER === "true" ? await PlaywrightBlocker.fromPrebuiltAdsAndTracking(fetch) .then((blker) => { const mostBlocked = blker.blockFonts().blockMedias().blockFrames().blockImages(); if (config.WEBSEARCH_JAVASCRIPT === "false") return mostBlocked.blockScripts(); return mostBlocked; }) .catch((err) => { logger.error(err, "Failed to initialize PlaywrightBlocker from prebuilt lists"); return PlaywrightBlocker.empty(); }) : PlaywrightBlocker.empty(); let browserSingleton: Promise<Browser> | undefined; async function getBrowser() { const browser = await chromium.launch({ headless: true }); onExit(() => browser.close()); browser.on("disconnected", () => { logger.warn("Browser closed"); browserSingleton = undefined; }); return browser; } async function getPlaywrightCtx() { if (!browserSingleton) browserSingleton = getBrowser(); const browser = await browserSingleton; const device = devices["Desktop Chrome"]; const options: BrowserContextOptions = { ...device, // Increasing width improves spatial clustering accuracy screen: { width: 3840, height: 1080, }, viewport: { width: 3840, height: 1080, }, reducedMotion: "reduce", acceptDownloads: false, timezoneId: "America/New_York", locale: "en-US", }; return browser.newContext(options); } export async function withPage<T>( url: string, callback: (page: Page, response?: Response) => Promise<T> ): Promise<T> { const ctx = await getPlaywrightCtx(); try { const page = await ctx.newPage(); if (config.PLAYWRIGHT_ADBLOCKER === "true") { await blocker.enableBlockingInPage(page); } await page.route("**", (route, request) => { const requestUrl = request.url(); if (!requestUrl.startsWith("https://")) { logger.warn(`Blocked request to: ${requestUrl}`); return route.abort(); } return route.continue(); }); const res = await page .goto(url, { waitUntil: "load", timeout: parseInt(config.WEBSEARCH_TIMEOUT) }) .catch(() => { console.warn( `Failed to load page within ${parseInt(config.WEBSEARCH_TIMEOUT) / 1000}s: ${url}` ); }); return await callback(page, res ?? undefined); } finally { await ctx.close(); } }
chat-ui/src/lib/server/websearch/scrape/playwright.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/scrape/playwright.ts", "repo_id": "chat-ui", "token_count": 938 }
88
import { writable } from "svelte/store"; export const ERROR_MESSAGES = { default: "Oops, something went wrong.", authOnly: "You have to be logged in.", rateLimited: "You are sending too many messages. Try again later.", }; export const error = writable<string | undefined>(undefined);
chat-ui/src/lib/stores/errors.ts/0
{ "file_path": "chat-ui/src/lib/stores/errors.ts", "repo_id": "chat-ui", "token_count": 86 }
89
import type { Session } from "./Session"; import type { Timestamps } from "./Timestamps"; import type { User } from "./User"; export interface MessageEvent extends Pick<Timestamps, "createdAt"> { userId: User["_id"] | Session["sessionId"]; ip?: string; expiresAt: Date; type: "message" | "export"; }
chat-ui/src/lib/types/MessageEvent.ts/0
{ "file_path": "chat-ui/src/lib/types/MessageEvent.ts", "repo_id": "chat-ui", "token_count": 99 }
90
import type { ObjectId } from "mongodb"; import type { Conversation } from "./Conversation"; import type { Timestamps } from "./Timestamps"; import type { HeaderElement } from "$lib/server/websearch/markdown/types"; export interface WebSearch extends Timestamps { _id?: ObjectId; convId?: Conversation["_id"]; prompt: string; searchQuery: string; results: WebSearchSource[]; contextSources: WebSearchUsedSource[]; } export interface WebSearchSource { title?: string; link: string; } export interface WebSearchScrapedSource extends WebSearchSource { page: WebSearchPage; } export interface WebSearchPage { title: string; siteName?: string; author?: string; description?: string; createdAt?: string; modifiedAt?: string; markdownTree: HeaderElement; } export interface WebSearchUsedSource extends WebSearchScrapedSource { context: string; } export type WebSearchMessageSources = { type: "sources"; sources: WebSearchSource[]; }; // eslint-disable-next-line no-shadow export enum WebSearchProvider { GOOGLE = "Google", YOU = "You.com", SEARXNG = "SearXNG", BING = "Bing", }
chat-ui/src/lib/types/WebSearch.ts/0
{ "file_path": "chat-ui/src/lib/types/WebSearch.ts", "repo_id": "chat-ui", "token_count": 348 }
91
import katex from "katex"; import "katex/dist/contrib/mhchem.mjs"; import { Marked } from "marked"; import type { Tokens, TokenizerExtension, RendererExtension } from "marked"; import type { WebSearchSource } from "$lib/types/WebSearch"; import hljs from "highlight.js"; interface katexBlockToken extends Tokens.Generic { type: "katexBlock"; raw: string; text: string; displayMode: true; } interface katexInlineToken extends Tokens.Generic { type: "katexInline"; raw: string; text: string; displayMode: false; } export const katexBlockExtension: TokenizerExtension & RendererExtension = { name: "katexBlock", level: "block", start(src: string): number | undefined { const match = src.match(/(\${2}|\\\[)/); return match ? match.index : -1; }, tokenizer(src: string): katexBlockToken | undefined { // 1) $$ ... $$ const rule1 = /^\${2}([\s\S]+?)\${2}/; const match1 = rule1.exec(src); if (match1) { const token: katexBlockToken = { type: "katexBlock", raw: match1[0], text: match1[1].trim(), displayMode: true, }; return token; } // 2) \[ ... \] const rule2 = /^\\\[([\s\S]+?)\\\]/; const match2 = rule2.exec(src); if (match2) { const token: katexBlockToken = { type: "katexBlock", raw: match2[0], text: match2[1].trim(), displayMode: true, }; return token; } return undefined; }, renderer(token) { if (token.type === "katexBlock") { return katex.renderToString(token.text, { throwOnError: false, displayMode: token.displayMode, }); } return undefined; }, }; const katexInlineExtension: TokenizerExtension & RendererExtension = { name: "katexInline", level: "inline", start(src: string): number | undefined { const match = src.match(/(\$|\\\()/); return match ? match.index : -1; }, tokenizer(src: string): katexInlineToken | undefined { // 1) $...$ const rule1 = /^\$([^$]+?)\$/; const match1 = rule1.exec(src); if (match1) { const token: katexInlineToken = { type: "katexInline", raw: match1[0], text: match1[1].trim(), displayMode: false, }; return token; } // 2) \(...\) const rule2 = /^\\\(([\s\S]+?)\\\)/; const match2 = rule2.exec(src); if (match2) { const token: katexInlineToken = { type: "katexInline", raw: match2[0], text: match2[1].trim(), displayMode: false, }; return token; } return undefined; }, renderer(token) { if (token.type === "katexInline") { return katex.renderToString(token.text, { throwOnError: false, displayMode: token.displayMode, }); } return undefined; }, }; function escapeHTML(content: string) { return content.replace( /[<>&"']/g, (x) => ({ "<": "&lt;", ">": "&gt;", "&": "&amp;", "'": "&#39;", '"': "&quot;", })[x] || x ); } function addInlineCitations(md: string, webSearchSources: WebSearchSource[] = []): string { const linkStyle = "color: rgb(59, 130, 246); text-decoration: none; hover:text-decoration: underline;"; return md.replace(/\[(\d+)\]/g, (match: string) => { const indices: number[] = (match.match(/\d+/g) || []).map(Number); const links: string = indices .map((index: number) => { if (index === 0) return false; const source = webSearchSources[index - 1]; if (source) { return `<a href="${source.link}" target="_blank" rel="noreferrer" style="${linkStyle}">${index}</a>`; } return ""; }) .filter(Boolean) .join(", "); return links ? ` <sup>${links}</sup>` : match; }); } function createMarkedInstance(sources: WebSearchSource[]): Marked { return new Marked({ hooks: { postprocess: (html) => addInlineCitations(html, sources), }, extensions: [katexBlockExtension, katexInlineExtension], renderer: { link: (href, title, text) => `<a href="${href?.replace(/>$/, "")}" target="_blank" rel="noreferrer">${text}</a>`, html: (html) => escapeHTML(html), }, gfm: true, breaks: true, }); } type CodeToken = { type: "code"; lang: string; code: string; rawCode: string; }; type TextToken = { type: "text"; html: string | Promise<string>; }; export async function processTokens(content: string, sources: WebSearchSource[]): Promise<Token[]> { const marked = createMarkedInstance(sources); const tokens = marked.lexer(content); const processedTokens = await Promise.all( tokens.map(async (token) => { if (token.type === "code") { return { type: "code" as const, lang: token.lang, code: hljs.highlightAuto(token.text, hljs.getLanguage(token.lang)?.aliases).value, rawCode: token.text, }; } else { return { type: "text" as const, html: marked.parse(token.raw), }; } }) ); return processedTokens; } export function processTokensSync(content: string, sources: WebSearchSource[]): Token[] { const marked = createMarkedInstance(sources); const tokens = marked.lexer(content); return tokens.map((token) => { if (token.type === "code") { return { type: "code" as const, lang: token.lang, code: hljs.highlightAuto(token.text, hljs.getLanguage(token.lang)?.aliases).value, rawCode: token.text, }; } return { type: "text" as const, html: marked.parse(token.raw) }; }); } export type Token = CodeToken | TextToken;
chat-ui/src/lib/utils/marked.ts/0
{ "file_path": "chat-ui/src/lib/utils/marked.ts", "repo_id": "chat-ui", "token_count": 2227 }
92
import type { Tool } from "$lib/types/Tool"; /** * Checks if a tool's name equals a value. Replaces all hyphens with underscores before comparison * since some models return underscores even when hyphens are used in the request. **/ export function toolHasName(name: string, tool: Pick<Tool, "name">): boolean { return tool.name.replaceAll("-", "_") === name.replaceAll("-", "_"); } export const colors = ["purple", "blue", "green", "yellow", "red"] as const; export const icons = [ "wikis", "tools", "camera", "code", "email", "cloud", "terminal", "game", "chat", "speaker", "video", ] as const;
chat-ui/src/lib/utils/tools.ts/0
{ "file_path": "chat-ui/src/lib/utils/tools.ts", "repo_id": "chat-ui", "token_count": 202 }
93
<script lang="ts"> import "../styles/main.css"; import { onDestroy, onMount, untrack } from "svelte"; import { goto } from "$app/navigation"; import { base } from "$app/paths"; import { page } from "$app/stores"; import { error } from "$lib/stores/errors"; import { createSettingsStore } from "$lib/stores/settings"; import { shareConversation } from "$lib/shareConversation"; import Toast from "$lib/components/Toast.svelte"; import NavMenu from "$lib/components/NavMenu.svelte"; import MobileNav from "$lib/components/MobileNav.svelte"; import titleUpdate from "$lib/stores/titleUpdate"; import DisclaimerModal from "$lib/components/DisclaimerModal.svelte"; import ExpandNavigation from "$lib/components/ExpandNavigation.svelte"; import { loginModalOpen } from "$lib/stores/loginModal"; import LoginModal from "$lib/components/LoginModal.svelte"; import OverloadedModal from "$lib/components/OverloadedModal.svelte"; import Search from "$lib/components/chat/Search.svelte"; import { setContext } from "svelte"; import { handleResponse, useAPIClient } from "$lib/APIClient"; let { data = $bindable(), children } = $props(); setContext("publicConfig", data.publicConfig); const publicConfig = data.publicConfig; const client = useAPIClient(); let conversations = $state(data.conversations); $effect(() => { data.conversations && untrack(() => (conversations = data.conversations)); }); let isNavCollapsed = $state(false); let overloadedModalOpen = $state(false); let errorToastTimeout: ReturnType<typeof setTimeout>; let currentError: string | undefined = $state(); async function onError() { // If a new different error comes, wait for the current error to hide first if ($error && currentError && $error !== currentError) { clearTimeout(errorToastTimeout); currentError = undefined; await new Promise((resolve) => setTimeout(resolve, 300)); } currentError = $error; if (currentError === "Model is overloaded") { overloadedModalOpen = true; } errorToastTimeout = setTimeout(() => { $error = undefined; currentError = undefined; }, 10000); } async function deleteConversation(id: string) { client .conversations({ id }) .delete() .then(handleResponse) .then(async () => { conversations = conversations.filter((conv) => conv.id !== id); if ($page.params.id === id) { await goto(`${base}/`, { invalidateAll: true }); } }) .catch((err) => { console.error(err); $error = String(err); }); } async function editConversationTitle(id: string, title: string) { client .conversations({ id }) .patch({ title }) .then(handleResponse) .then(async () => { conversations = conversations.map((conv) => (conv.id === id ? { ...conv, title } : conv)); }) .catch((err) => { console.error(err); $error = String(err); }); } onDestroy(() => { clearTimeout(errorToastTimeout); }); $effect(() => { if ($error) onError(); }); $effect(() => { if ($titleUpdate) { const convIdx = conversations.findIndex(({ id }) => id === $titleUpdate?.convId); if (convIdx != -1) { conversations[convIdx].title = $titleUpdate?.title ?? conversations[convIdx].title; } $titleUpdate = null; } }); const settings = createSettingsStore(data.settings); onMount(async () => { if ($page.url.searchParams.has("model")) { await settings .instantSet({ activeModel: $page.url.searchParams.get("model") ?? $settings.activeModel, }) .then(async () => { const query = new URLSearchParams($page.url.searchParams.toString()); query.delete("model"); await goto(`${base}/?${query.toString()}`, { invalidateAll: true, }); }); } if ($page.url.searchParams.has("tools")) { const tools = $page.url.searchParams.get("tools")?.split(","); await settings .instantSet({ tools: [...($settings.tools ?? []), ...(tools ?? [])], }) .then(async () => { const query = new URLSearchParams($page.url.searchParams.toString()); query.delete("tools"); await goto(`${base}/?${query.toString()}`, { invalidateAll: true, }); }); } if ($page.url.searchParams.has("token")) { const token = $page.url.searchParams.get("token"); await fetch(`${base}/api/user/validate-token`, { method: "POST", body: JSON.stringify({ token }), }).then(() => { goto(`${base}/`, { invalidateAll: true }); }); } }); let mobileNavTitle = $derived( ["/models", "/assistants", "/privacy", "/tools"].includes($page.route.id ?? "") ? "" : conversations.find((conv) => conv.id === $page.params.id)?.title ); let showDisclaimer = $derived( !$settings.ethicsModalAccepted && $page.url.pathname !== `${base}/privacy` && publicConfig.PUBLIC_APP_DISCLAIMER === "1" && !($page.data.shared === true) ); </script> <svelte:head> <title>{publicConfig.PUBLIC_APP_NAME}</title> <meta name="description" content="The first open source alternative to ChatGPT. 💪" /> <meta name="twitter:card" content="summary_large_image" /> <meta name="twitter:site" content="@huggingface" /> <!-- use those meta tags everywhere except on the share assistant page --> <!-- feel free to refacto if there's a better way --> {#if !$page.url.pathname.includes("/assistant/") && $page.route.id !== "/assistants" && !$page.url.pathname.includes("/models/") && !$page.url.pathname.includes("/tools")} <meta property="og:title" content={publicConfig.PUBLIC_APP_NAME} /> <meta property="og:type" content="website" /> <meta property="og:url" content="{publicConfig.PUBLIC_ORIGIN || $page.url.origin}{base}" /> <meta property="og:image" content="{publicConfig.assetPath}/thumbnail.png" /> <meta property="og:description" content={publicConfig.PUBLIC_APP_DESCRIPTION} /> {/if} <link rel="icon" href="{publicConfig.assetPath}/favicon.ico" sizes="32x32" /> <link rel="icon" href="{publicConfig.assetPath}/icon.svg" type="image/svg+xml" /> <link rel="apple-touch-icon" href="{publicConfig.assetPath}/apple-touch-icon.png" /> <link rel="manifest" href="{publicConfig.assetPath}/manifest.json" /> {#if publicConfig.PUBLIC_PLAUSIBLE_SCRIPT_URL && publicConfig.PUBLIC_ORIGIN} <script defer data-domain={new URL(publicConfig.PUBLIC_ORIGIN).hostname} src={publicConfig.PUBLIC_PLAUSIBLE_SCRIPT_URL} ></script> {/if} {#if publicConfig.PUBLIC_APPLE_APP_ID} <meta name="apple-itunes-app" content={`app-id=${publicConfig.PUBLIC_APPLE_APP_ID}`} /> {/if} </svelte:head> {#if showDisclaimer} <DisclaimerModal on:close={() => ($settings.ethicsModalAccepted = true)} /> {/if} {#if $loginModalOpen} <LoginModal on:close={() => { $loginModalOpen = false; }} /> {/if} {#if overloadedModalOpen && publicConfig.isHuggingChat} <OverloadedModal onClose={() => (overloadedModalOpen = false)} /> {/if} <Search /> <div class="fixed grid h-full w-screen grid-cols-1 grid-rows-[auto,1fr] overflow-hidden text-smd {!isNavCollapsed ? 'md:grid-cols-[290px,1fr]' : 'md:grid-cols-[0px,1fr]'} transition-[300ms] [transition-property:grid-template-columns] dark:text-gray-300 md:grid-rows-[1fr]" > <ExpandNavigation isCollapsed={isNavCollapsed} onClick={() => (isNavCollapsed = !isNavCollapsed)} classNames="absolute inset-y-0 z-10 my-auto {!isNavCollapsed ? 'left-[290px]' : 'left-0'} *:transition-transform" /> <MobileNav title={mobileNavTitle}> <NavMenu {conversations} user={data.user} canLogin={!data.user && data.loginEnabled} on:shareConversation={(ev) => shareConversation(ev.detail.id, ev.detail.title)} on:deleteConversation={(ev) => deleteConversation(ev.detail)} on:editConversationTitle={(ev) => editConversationTitle(ev.detail.id, ev.detail.title)} /> </MobileNav> <nav class="grid max-h-screen grid-cols-1 grid-rows-[auto,1fr,auto] overflow-hidden *:w-[290px] max-md:hidden" > <NavMenu {conversations} user={data.user} canLogin={!data.user && data.loginEnabled} on:shareConversation={(ev) => shareConversation(ev.detail.id, ev.detail.title)} on:deleteConversation={(ev) => deleteConversation(ev.detail)} on:editConversationTitle={(ev) => editConversationTitle(ev.detail.id, ev.detail.title)} /> </nav> {#if currentError} <Toast message={currentError} /> {/if} {@render children?.()} </div>
chat-ui/src/routes/+layout.svelte/0
{ "file_path": "chat-ui/src/routes/+layout.svelte", "repo_id": "chat-ui", "token_count": 3200 }
94
import { config } from "$lib/server/config"; import { authCondition, requiresUser } from "$lib/server/auth.js"; import { collections } from "$lib/server/database.js"; import { editableToolSchema } from "$lib/server/tools/index.js"; import { generateSearchTokens } from "$lib/utils/searchTokens.js"; import { ObjectId } from "mongodb"; import { ReviewStatus } from "$lib/types/Review.js"; import { error } from "@sveltejs/kit"; import { usageLimits } from "$lib/server/usageLimits.js"; export async function POST({ request, locals }) { if (config.COMMUNITY_TOOLS !== "true") { error(403, "Community tools are not enabled"); } const body = await request.json(); const parse = editableToolSchema.safeParse(body); if (!parse.success) { // Loop through the errors array and create a custom errors array const errors = parse.error.errors.map((error) => { return { field: error.path[0], message: error.message, }; }); return new Response(JSON.stringify({ error: true, errors }), { status: 400 }); } // can only create tools when logged in, IF login is setup if (!locals.user && requiresUser) { const errors = [{ field: "description", message: "Must be logged in. Unauthorized" }]; return new Response(JSON.stringify({ error: true, errors }), { status: 400 }); } const toolCounts = await collections.tools.countDocuments({ createdById: locals.user?._id }); if (usageLimits?.tools && toolCounts > usageLimits.tools) { const errors = [ { field: "description", message: "You have reached the maximum number of tools. Delete some to continue.", }, ]; return new Response(JSON.stringify({ error: true, errors }), { status: 400 }); } if (!locals.user || !authCondition(locals)) { error(401, "Unauthorized"); } const { insertedId } = await collections.tools.insertOne({ ...parse.data, type: "community" as const, _id: new ObjectId(), createdById: locals.user?._id, createdByName: locals.user?.username, createdAt: new Date(), updatedAt: new Date(), last24HoursUseCount: 0, useCount: 0, review: ReviewStatus.PRIVATE, searchTokens: generateSearchTokens(parse.data.displayName), }); return new Response(JSON.stringify({ toolId: insertedId.toString() }), { status: 200 }); }
chat-ui/src/routes/api/tools/+server.ts/0
{ "file_path": "chat-ui/src/routes/api/tools/+server.ts", "repo_id": "chat-ui", "token_count": 742 }
95
<script lang="ts"> import ChatWindow from "$lib/components/chat/ChatWindow.svelte"; import { pendingMessage } from "$lib/stores/pendingMessage"; import { isAborted } from "$lib/stores/isAborted"; import { onMount } from "svelte"; import { page } from "$app/state"; import { beforeNavigate, goto, invalidateAll } from "$app/navigation"; import { base } from "$app/paths"; import { shareConversation } from "$lib/shareConversation"; import { ERROR_MESSAGES, error } from "$lib/stores/errors"; import { findCurrentModel } from "$lib/utils/models"; import { webSearchParameters } from "$lib/stores/webSearchParameters"; import type { Message } from "$lib/types/Message"; import { MessageReasoningUpdateType, MessageUpdateStatus, MessageUpdateType, } from "$lib/types/MessageUpdate"; import titleUpdate from "$lib/stores/titleUpdate"; import file2base64 from "$lib/utils/file2base64"; import { addChildren } from "$lib/utils/tree/addChildren"; import { addSibling } from "$lib/utils/tree/addSibling"; import { fetchMessageUpdates } from "$lib/utils/messageUpdates"; import type { v4 } from "uuid"; import { useSettingsStore } from "$lib/stores/settings.js"; import { browser } from "$app/environment"; import type { TreeNode, TreeId } from "$lib/utils/tree/tree"; import "katex/dist/katex.min.css"; import { updateDebouncer } from "$lib/utils/updates.js"; import { documentParserToolId } from "$lib/utils/toolIds.js"; let { data = $bindable() } = $props(); let loading = $state(false); let pending = $state(false); let initialRun = true; let files: File[] = $state([]); let conversations = $state(data.conversations); $effect(() => { conversations = data.conversations; }); function createMessagesPath<T>(messages: TreeNode<T>[], msgId?: TreeId): TreeNode<T>[] { if (initialRun) { if (!msgId && page.url.searchParams.get("leafId")) { msgId = page.url.searchParams.get("leafId") as string; page.url.searchParams.delete("leafId"); } if (!msgId && browser && localStorage.getItem("leafId")) { msgId = localStorage.getItem("leafId") as string; } initialRun = false; } const msg = messages.find((msg) => msg.id === msgId) ?? messages.at(-1); if (!msg) return []; // ancestor path const { ancestors } = msg; const path = []; if (ancestors?.length) { for (const ancestorId of ancestors) { const ancestor = messages.find((msg) => msg.id === ancestorId); if (ancestor) { path.push(ancestor); } } } // push the node itself in the middle path.push(msg); // children path let childrenIds = msg.children; while (childrenIds?.length) { let lastChildId = childrenIds.at(-1); const lastChild = messages.find((msg) => msg.id === lastChildId); if (lastChild) { path.push(lastChild); } childrenIds = lastChild?.children; } return path; } function createMessagesAlternatives<T>(messages: TreeNode<T>[]): TreeId[][] { const alternatives = []; for (const message of messages) { if (message.children?.length) { alternatives.push(message.children); } } return alternatives; } async function convFromShared() { try { loading = true; const res = await fetch(`${base}/conversation`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ fromShare: page.params.id, model: data.model, }), }); if (!res.ok) { error.set(await res.text()); console.error("Error while creating conversation: " + (await res.text())); return; } const { conversationId } = await res.json(); return conversationId; } catch (err) { error.set(ERROR_MESSAGES.default); console.error(String(err)); throw err; } } // this function is used to send new message to the backends async function writeMessage({ prompt, messageId = messagesPath.at(-1)?.id ?? undefined, isRetry = false, isContinue = false, }: { prompt?: string; messageId?: ReturnType<typeof v4>; isRetry?: boolean; isContinue?: boolean; }): Promise<void> { try { $isAborted = false; loading = true; pending = true; const base64Files = await Promise.all( (files ?? []).map((file) => file2base64(file).then((value) => ({ type: "base64" as const, value, mime: file.type, name: file.name, })) ) ); let messageToWriteToId: Message["id"] | undefined = undefined; // used for building the prompt, subtree of the conversation that goes from the latest message to the root if (isContinue && messageId) { if ((messages.find((msg) => msg.id === messageId)?.children?.length ?? 0) > 0) { $error = "Can only continue the last message"; } else { messageToWriteToId = messageId; } } else if (isRetry && messageId) { // two cases, if we're retrying a user message with a newPrompt set, // it means we're editing a user message // if we're retrying on an assistant message, newPrompt cannot be set // it means we're retrying the last assistant message for a new answer const messageToRetry = messages.find((message) => message.id === messageId); if (!messageToRetry) { $error = "Message not found"; } if (messageToRetry?.from === "user" && prompt) { // add a sibling to this message from the user, with the alternative prompt // add a children to that sibling, where we can write to const newUserMessageId = addSibling( { messages, rootMessageId: data.rootMessageId, }, { from: "user", content: prompt, files: messageToRetry.files, }, messageId ); messageToWriteToId = addChildren( { messages, rootMessageId: data.rootMessageId, }, { from: "assistant", content: "" }, newUserMessageId ); } else if (messageToRetry?.from === "assistant") { // we're retrying an assistant message, to generate a new answer // just add a sibling to the assistant answer where we can write to messageToWriteToId = addSibling( { messages, rootMessageId: data.rootMessageId, }, { from: "assistant", content: "" }, messageId ); } } else { // just a normal linear conversation, so we add the user message // and the blank assistant message back to back const newUserMessageId = addChildren( { messages, rootMessageId: data.rootMessageId, }, { from: "user", content: prompt ?? "", files: base64Files, }, messageId ); if (!data.rootMessageId) { data.rootMessageId = newUserMessageId; } messageToWriteToId = addChildren( { messages, rootMessageId: data.rootMessageId, }, { from: "assistant", content: "", }, newUserMessageId ); } const userMessage = messages.find((message) => message.id === messageId); const messageToWriteTo = messages.find((message) => message.id === messageToWriteToId); if (!messageToWriteTo) { throw new Error("Message to write to not found"); } // disable websearch if assistant is present const hasAssistant = !!page.data.assistant; const messageUpdatesAbortController = new AbortController(); let tools = $settings.tools; if (!files.some((file) => file.type.startsWith("application/"))) { tools = $settings.tools?.filter((tool) => tool !== documentParserToolId); } const messageUpdatesIterator = await fetchMessageUpdates( page.params.id, { base, inputs: prompt, messageId, isRetry, isContinue, webSearch: !hasAssistant && !activeModel.tools && $webSearchParameters.useSearch, tools, files: isRetry ? userMessage?.files : base64Files, }, messageUpdatesAbortController.signal ).catch((err) => { error.set(err.message); }); if (messageUpdatesIterator === undefined) return; files = []; let buffer = ""; // Initialize lastUpdateTime outside the loop to persist between updates let lastUpdateTime = new Date(); let reasoningBuffer = ""; let reasoningLastUpdate = new Date(); for await (const update of messageUpdatesIterator) { if ($isAborted) { messageUpdatesAbortController.abort(); return; } // Remove null characters added due to remote keylogging prevention // See server code for more details if (update.type === MessageUpdateType.Stream) { update.token = update.token.replaceAll("\0", ""); } const isHighFrequencyUpdate = (update.type === MessageUpdateType.Reasoning && update.subtype === MessageReasoningUpdateType.Stream) || update.type === MessageUpdateType.Stream || (update.type === MessageUpdateType.Status && update.status === MessageUpdateStatus.KeepAlive); if (!isHighFrequencyUpdate) { messageToWriteTo.updates = [...(messageToWriteTo.updates ?? []), update]; } const currentTime = new Date(); if (update.type === MessageUpdateType.Stream && !$settings.disableStream) { buffer += update.token; // Check if this is the first update or if enough time has passed if (currentTime.getTime() - lastUpdateTime.getTime() > updateDebouncer.maxUpdateTime) { messageToWriteTo.content += buffer; buffer = ""; lastUpdateTime = currentTime; } pending = false; } else if ( update.type === MessageUpdateType.Status && update.status === MessageUpdateStatus.Error ) { $error = update.message ?? "An error has occurred"; } else if (update.type === MessageUpdateType.Title) { const convInData = conversations.find(({ id }) => id === page.params.id); if (convInData) { convInData.title = update.title; $titleUpdate = { title: update.title, convId: page.params.id, }; } } else if (update.type === MessageUpdateType.File) { messageToWriteTo.files = [ ...(messageToWriteTo.files ?? []), { type: "hash", value: update.sha, mime: update.mime, name: update.name }, ]; } else if (update.type === MessageUpdateType.Reasoning) { if (!messageToWriteTo.reasoning) { messageToWriteTo.reasoning = ""; } if (update.subtype === MessageReasoningUpdateType.Stream) { reasoningBuffer += update.token; if ( currentTime.getTime() - reasoningLastUpdate.getTime() > updateDebouncer.maxUpdateTime ) { messageToWriteTo.reasoning += reasoningBuffer; reasoningBuffer = ""; reasoningLastUpdate = currentTime; } } } } } catch (err) { if (err instanceof Error && err.message.includes("overloaded")) { $error = "Too much traffic, please try again."; } else if (err instanceof Error && err.message.includes("429")) { $error = ERROR_MESSAGES.rateLimited; } else if (err instanceof Error) { $error = err.message; } else { $error = ERROR_MESSAGES.default; } console.error(err); } finally { loading = false; pending = false; await invalidateAll(); } } async function voteMessage(score: Message["score"], messageId: string) { let conversationId = page.params.id; let oldScore: Message["score"] | undefined; // optimistic update to avoid waiting for the server messages = messages.map((message) => { if (message.id === messageId) { oldScore = message.score; return { ...message, score }; } return message; }); try { await fetch(`${base}/conversation/${conversationId}/message/${messageId}/vote`, { method: "POST", body: JSON.stringify({ score }), }); } catch { // revert score on any error messages = messages.map((message) => { return message.id !== messageId ? message : { ...message, score: oldScore }; }); } } onMount(async () => { // only used in case of creating new conversations (from the parent POST endpoint) if ($pendingMessage) { files = $pendingMessage.files; await writeMessage({ prompt: $pendingMessage.content }); $pendingMessage = undefined; } }); async function onMessage(event: CustomEvent<string>) { if (!data.shared) { await writeMessage({ prompt: event.detail }); } else { await convFromShared() .then(async (convId) => { await goto(`${base}/conversation/${convId}`, { invalidateAll: true }); }) .then(async () => await writeMessage({ prompt: event.detail })) .finally(() => (loading = false)); } } async function onRetry(event: CustomEvent<{ id: Message["id"]; content?: string }>) { const lastMsgId = event.detail.id; messagesPath = createMessagesPath(messages, lastMsgId); if (!data.shared) { await writeMessage({ prompt: event.detail.content, messageId: event.detail.id, isRetry: true, }); } else { await convFromShared() .then(async (convId) => { await goto(`${base}/conversation/${convId}`, { invalidateAll: true }); }) .then( async () => await writeMessage({ prompt: event.detail.content, messageId: event.detail.id, isRetry: true, }) ) .finally(() => (loading = false)); } } async function onShowAlternateMsg(event: CustomEvent<{ id: Message["id"] }>) { const msgId = event.detail.id; messagesPath = createMessagesPath(messages, msgId); } async function onContinue(event: CustomEvent<{ id: Message["id"] }>) { if (!data.shared) { await writeMessage({ messageId: event.detail.id, isContinue: true }); } else { await convFromShared() .then(async (convId) => { await goto(`${base}/conversation/${convId}`, { invalidateAll: true }); }) .then( async () => await writeMessage({ messageId: event.detail.id, isContinue: true, }) ) .finally(() => (loading = false)); } } const settings = useSettingsStore(); let messages = $state(data.messages); $effect(() => { messages = data.messages; }); let activeModel = $derived(findCurrentModel([...data.models, ...data.oldModels], data.model)); // create a linear list of `messagesPath` from `messages` that is a tree of threaded messages let messagesPath = $derived(createMessagesPath(messages)); let messagesAlternatives = $derived(createMessagesAlternatives(messages)); $effect(() => { if (browser && messagesPath.at(-1)?.id) { localStorage.setItem("leafId", messagesPath.at(-1)?.id as string); } }); beforeNavigate(() => { if (page.params.id) { $isAborted = true; loading = false; } }); let title = $derived( conversations.find((conv) => conv.id === page.params.id)?.title ?? data.title ); </script> <svelte:head> <title>{title}</title> </svelte:head> <ChatWindow {loading} {pending} messages={messagesPath as Message[]} {messagesAlternatives} shared={data.shared} preprompt={data.preprompt} bind:files on:message={onMessage} on:retry={onRetry} on:continue={onContinue} on:showAlternateMsg={onShowAlternateMsg} on:vote={(event) => voteMessage(event.detail.score, event.detail.id)} on:share={() => shareConversation(page.params.id, data.title)} on:stop={async () => { await fetch(`${base}/conversation/${page.params.id}/stop-generating`, { method: "POST", }).then((r) => { if (r.ok) { setTimeout(() => { $isAborted = true; loading = false; }, 3000); } else { $isAborted = true; loading = false; } }); }} models={data.models} currentModel={findCurrentModel([...data.models, ...data.oldModels], data.model)} assistant={data.assistant} />
chat-ui/src/routes/conversation/[id]/+page.svelte/0
{ "file_path": "chat-ui/src/routes/conversation/[id]/+page.svelte", "repo_id": "chat-ui", "token_count": 6099 }
96
import { base } from "$app/paths"; import { redirect } from "@sveltejs/kit"; export async function load({ params, parent, fetch }) { const r = await fetch(`${base}/api/v2/models/${params.model}/subscribe`, { method: "POST", }); if (!r.ok) { redirect(302, base + "/"); } return { settings: await parent().then((data) => ({ ...data.settings, activeModel: params.model, })), }; }
chat-ui/src/routes/models/[...model]/+page.ts/0
{ "file_path": "chat-ui/src/routes/models/[...model]/+page.ts", "repo_id": "chat-ui", "token_count": 153 }
97
<script lang="ts"> import AssistantSettings from "$lib/components/AssistantSettings.svelte"; import type { PageData } from "./$types"; interface Props { data: PageData; } let { data }: Props = $props(); </script> <AssistantSettings models={data.models} />
chat-ui/src/routes/settings/(nav)/assistants/new/+page.svelte/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/assistants/new/+page.svelte", "repo_id": "chat-ui", "token_count": 87 }
98
repos: - repo: https://github.com/charliermarsh/ruff-pre-commit # https://github.com/charliermarsh/ruff#usage rev: 'v0.11.8' hooks: # Run the linter. - id: ruff args: [ --fix ] # Run the formatter. - id: ruff-format
datasets/.pre-commit-config.yaml/0
{ "file_path": "datasets/.pre-commit-config.yaml", "repo_id": "datasets", "token_count": 122 }
99
import json import sys def format_json_to_md(input_json_file, output_md_file): with open(input_json_file, encoding="utf-8") as f: results = json.load(f) output_md = ["<details>", "<summary>Show updated benchmarks!</summary>", " "] for benchmark_name in sorted(results): benchmark_res = results[benchmark_name] benchmark_file_name = benchmark_name.split("/")[-1] output_md.append(f"### Benchmark: {benchmark_file_name}") title = "| metric |" lines = "|--------|" value = "| new / old (diff) |" for metric_name in sorted(benchmark_res): metric_vals = benchmark_res[metric_name] new_val = metric_vals["new"] old_val = metric_vals.get("old", None) dif_val = metric_vals.get("diff", None) val_str = f" {new_val:f}" if isinstance(new_val, (int, float)) else "None" if old_val is not None: val_str += f" / {old_val:f}" if isinstance(old_val, (int, float)) else "None" if dif_val is not None: val_str += f" ({dif_val:f})" if isinstance(dif_val, (int, float)) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("</details>") with open(output_md_file, "w", encoding="utf-8") as f: f.writelines("\n".join(output_md)) if __name__ == "__main__": input_json_file = sys.argv[1] output_md_file = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
datasets/benchmarks/format.py/0
{ "file_path": "datasets/benchmarks/format.py", "repo_id": "datasets", "token_count": 746 }
100
# Batch mapping Combining the utility of [`Dataset.map`] with batch mode is very powerful. It allows you to speed up processing, and freely control the size of the generated dataset. ## Need for speed The primary objective of batch mapping is to speed up processing. Often times, it is faster to work with batches of data instead of single examples. Naturally, batch mapping lends itself to tokenization. For example, the 🤗 [Tokenizers](https://huggingface.co/docs/tokenizers/python/latest/) library works faster with batches because it parallelizes the tokenization of all the examples in a batch. ## Input size != output size The ability to control the size of the generated dataset can be leveraged for many interesting use-cases. In the How-to [map](#map) section, there are examples of using batch mapping to: - Split long sentences into shorter chunks. - Augment a dataset with additional tokens. It is helpful to understand how this works, so you can come up with your own ways to use batch mapping. At this point, you may be wondering how you can control the size of the generated dataset. The answer is: **the mapped function does not have to return an output batch of the same size**. In other words, your mapped function input can be a batch of size `N` and return a batch of size `M`. The output `M` can be greater than or less than `N`. This means you can concatenate your examples, divide it up, and even add more examples! However, remember that all values in the output dictionary must contain the **same number of elements** as the other fields in the output dictionary. Otherwise, it is not possible to define the number of examples in the output returned by the mapped function. The number can vary between successive batches processed by the mapped function. For a single batch though, all values of the output dictionary should have the same length (i.e., the number of elements). For example, from a dataset of 1 column and 3 rows, if you use `map` to return a new column with twice as many rows, then you will have an error. In this case, you end up with one column with 3 rows, and one column with 6 rows. As you can see, the table will not be valid: ```py >>> from datasets import Dataset >>> dataset = Dataset.from_dict({"a": [0, 1, 2]}) >>> dataset.map(lambda batch: {"b": batch["a"] * 2}, batched=True) # new column with 6 elements: [0, 1, 2, 0, 1, 2] 'ArrowInvalid: Column 1 named b expected length 3 but got length 6' ``` To make it valid, you have to drop one of the columns: ```py >>> from datasets import Dataset >>> dataset = Dataset.from_dict({"a": [0, 1, 2]}) >>> dataset_with_duplicates = dataset.map(lambda batch: {"b": batch["a"] * 2}, remove_columns=["a"], batched=True) >>> len(dataset_with_duplicates) 6 ```
datasets/docs/source/about_map_batch.mdx/0
{ "file_path": "datasets/docs/source/about_map_batch.mdx", "repo_id": "datasets", "token_count": 722 }
101
# Image classification Image classification datasets are used to train a model to classify an entire image. There are a wide variety of applications enabled by these datasets such as identifying endangered wildlife species or screening for disease in medical images. This guide will show you how to apply transformations to an image classification dataset. Before you start, make sure you have up-to-date versions of `albumentations` and `cv2` installed: ```bash pip install -U albumentations opencv-python ``` This guide uses the [Beans](https://huggingface.co/datasets/beans) dataset for identifying the type of bean plant disease based on an image of its leaf. Load the dataset and take a look at an example: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("AI-Lab-Makerere/beans") >>> dataset["train"][10] {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x7F8D2F4D7A10>, 'image_file_path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/angular_leaf_spot/angular_leaf_spot_train.204.jpg', 'labels': 0} ``` The dataset has three fields: * `image`: a PIL image object. * `image_file_path`: the path to the image file. * `labels`: the label or category of the image. Next, check out an image: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/img_clf.png"> </div> Now apply some augmentations with `albumentations`. You'll randomly crop the image, flip it horizontally, and adjust its brightness. ```py >>> import cv2 >>> import albumentations >>> import numpy as np >>> transform = albumentations.Compose([ ... albumentations.RandomCrop(width=256, height=256), ... albumentations.HorizontalFlip(p=0.5), ... albumentations.RandomBrightnessContrast(p=0.2), ... ]) ``` Create a function to apply the transformation to the images: ```py >>> def transforms(examples): ... examples["pixel_values"] = [ ... transform(image=np.array(image))["image"] for image in examples["image"] ... ] ... ... return examples ``` Use the [`~Dataset.set_transform`] function to apply the transformation on-the-fly to batches of the dataset to consume less disk space: ```py >>> dataset.set_transform(transforms) ``` You can verify the transformation worked by indexing into the `pixel_values` of the first example: ```py >>> import numpy as np >>> import matplotlib.pyplot as plt >>> img = dataset["train"][0]["pixel_values"] >>> plt.imshow(img) ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/img_clf_aug.png"> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/img_clf_aug.png"/> </div> <Tip> Now that you know how to process a dataset for image classification, learn [how to train an image classification model](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb) and use it for inference. </Tip>
datasets/docs/source/image_classification.mdx/0
{ "file_path": "datasets/docs/source/image_classification.mdx", "repo_id": "datasets", "token_count": 1051 }
102
# Table Classes Each `Dataset` object is backed by a PyArrow Table. A Table can be loaded from either the disk (memory mapped) or in memory. Several Table types are available, and they all inherit from [`table.Table`]. ## Table [[autodoc]] datasets.table.Table - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes ## InMemoryTable [[autodoc]] datasets.table.InMemoryTable - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes - column_names - slice - filter - flatten - combine_chunks - cast - replace_schema_metadata - add_column - append_column - remove_column - set_column - rename_columns - select - drop - from_file - from_buffer - from_pandas - from_arrays - from_pydict - from_batches ## MemoryMappedTable [[autodoc]] datasets.table.MemoryMappedTable - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes - column_names - slice - filter - flatten - combine_chunks - cast - replace_schema_metadata - add_column - append_column - remove_column - set_column - rename_columns - select - drop - from_file ## ConcatenationTable [[autodoc]] datasets.table.ConcatenationTable - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes - column_names - slice - filter - flatten - combine_chunks - cast - replace_schema_metadata - add_column - append_column - remove_column - set_column - rename_columns - select - drop - from_blocks - from_tables ## Utils [[autodoc]] datasets.table.concat_tables [[autodoc]] datasets.table.list_table_cache_files
datasets/docs/source/package_reference/table_classes.mdx/0
{ "file_path": "datasets/docs/source/package_reference/table_classes.mdx", "repo_id": "datasets", "token_count": 1029 }
103
# Use with Polars This document is a quick introduction to using `datasets` with Polars, with a particular focus on how to process datasets using Polars functions, and how to convert a dataset to Polars or from Polars. This is particularly useful as it allows fast zero-copy operations, since both `datasets` and Polars use Arrow under the hood. ## Dataset format By default, datasets return regular Python objects: integers, floats, strings, lists, etc. To get Polars DataFrames or Series instead, you can set the format of the dataset to `polars` using [`Dataset.with_format`]: ```py >>> from datasets import Dataset >>> data = {"col_0": ["a", "b", "c", "d"], "col_1": [0., 0., 1., 1.]} >>> ds = Dataset.from_dict(data) >>> ds = ds.with_format("polars") >>> ds[0] # pl.DataFrame shape: (1, 2) ┌───────┬───────┐ │ col_0 ┆ col_1 │ │ --- ┆ --- │ │ str ┆ f64 │ ╞═══════╪═══════╡ │ a ┆ 0.0 │ └───────┴───────┘ >>> ds[:2] # pl.DataFrame shape: (2, 2) ┌───────┬───────┐ │ col_0 ┆ col_1 │ │ --- ┆ --- │ │ str ┆ f64 │ ╞═══════╪═══════╡ │ a ┆ 0.0 │ │ b ┆ 0.0 │ └───────┴───────┘ >>> ds["data"] # pl.Series shape: (4,) Series: 'col_0' [str] [ "a" "b" "c" "d" ] ``` This also works for `IterableDataset` objects obtained e.g. using `load_dataset(..., streaming=True)`: ```py >>> ds = ds.with_format("polars") >>> for df in ds.iter(batch_size=2): ... print(df) ... break shape: (2, 2) ┌───────┬───────┐ │ col_0 ┆ col_1 │ │ --- ┆ --- │ │ str ┆ f64 │ ╞═══════╪═══════╡ │ a ┆ 0.0 │ │ b ┆ 0.0 │ └───────┴───────┘ ``` ## Process data Polars functions are generally faster than regular hand-written python functions, and therefore they are a good option to optimize data processing. You can use Polars functions to process a dataset in [`Dataset.map`] or [`Dataset.filter`]: ```python >>> import polars as pl >>> from datasets import Dataset >>> data = {"col_0": ["a", "b", "c", "d"], "col_1": [0., 0., 1., 1.]} >>> ds = Dataset.from_dict(data) >>> ds = ds.with_format("polars") >>> ds = ds.map(lambda df: df.with_columns(pl.col("col_1").add(1).alias("col_2")), batched=True) >>> ds[:2] shape: (2, 3) ┌───────┬───────┬───────┐ │ col_0 ┆ col_1 ┆ col_2 │ │ --- ┆ --- ┆ --- │ │ str ┆ f64 ┆ f64 │ ╞═══════╪═══════╪═══════╡ │ a ┆ 0.0 ┆ 1.0 │ │ b ┆ 0.0 ┆ 1.0 │ └───────┴───────┴───────┘ >>> ds = ds.filter(lambda df: df["col_0"] == "b", batched=True) >>> ds[0] shape: (1, 3) ┌───────┬───────┬───────┐ │ col_0 ┆ col_1 ┆ col_2 │ │ --- ┆ --- ┆ --- │ │ str ┆ f64 ┆ f64 │ ╞═══════╪═══════╪═══════╡ │ b ┆ 0.0 ┆ 1.0 │ └───────┴───────┴───────┘ ``` We use `batched=True` because it is faster to process batches of data in Polars rather than row by row. It's also possible to use `batch_size=` in `map()` to set the size of each `df`. This also works for [`IterableDataset.map`] and [`IterableDataset.filter`]. ### Example: data extraction Many functions are available in Polars and for any data type: string, floats, integers, etc. You can find the full list [here](https://docs.pola.rs/api/python/stable/reference/expressions/functions.html). Those functions are written in Rust and run on batches of data which enables fast data processing. Here is an example that shows a 5x speed boost using Polars instead of a regular python function to extract solutions from a LLM reasoning dataset: ```python from datasets import load_dataset ds = load_dataset("ServiceNow-AI/R1-Distill-SFT", "v0", split="train") # Using a regular python function pattern = re.compile("boxed\\{(.*)\\}") result_ds = ds.map(lambda x: {"value_solution": m.group(1) if (m:=pattern.search(x["solution"])) else None}) # Time: 10s # Using a Polars function expr = pl.col("solution").str.extract("boxed\\{(.*)\\}").alias("value_solution") result_ds = ds.with_format("polars").map(lambda df: df.with_columns(expr), batched=True) # Time: 2s ``` ## Import or Export from Polars To import data from Polars, you can use [`Dataset.from_polars`]: ```python ds = Dataset.from_polars(df) ``` And you can use [`Dataset.to_polars`] to export a Dataset to a Polars DataFrame: ```python df = Dataset.to_polars(ds) ```
datasets/docs/source/use_with_polars.mdx/0
{ "file_path": "datasets/docs/source/use_with_polars.mdx", "repo_id": "datasets", "token_count": 1829 }
104
from abc import ABC, abstractmethod from argparse import ArgumentParser class BaseDatasetsCLICommand(ABC): @staticmethod @abstractmethod def register_subcommand(parser: ArgumentParser): raise NotImplementedError() @abstractmethod def run(self): raise NotImplementedError()
datasets/src/datasets/commands/__init__.py/0
{ "file_path": "datasets/src/datasets/commands/__init__.py", "repo_id": "datasets", "token_count": 107 }
105
import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.download_config import DownloadConfig from ..table import array_cast from ..utils.file_utils import is_local_path, xopen from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from torchcodec.decoders import AudioDecoder from .features import FeatureType @dataclass class Audio: """Audio [`Feature`] to extract audio data from an audio file. Input: The Audio feature accepts as input: - A `str`: Absolute path to the audio file (i.e. random access is allowed). - A `dict` with the keys: - `path`: String with relative path of the audio file to the archive file. - `bytes`: Bytes content of the audio file. This is useful for parquet or webdataset files which embed audio files. - A `dict` with the keys: - `array`: Array containing the audio sample - `sampling_rate`: Integer corresponding to the sampling rate of the audio sample. - A `torchcodec.decoders.AudioDecoder`: torchcodec audio decoder object. Output: The Audio features output data as `torchcodec.decoders.AudioDecoder` objects, with additional keys: - `array`: Array containing the audio sample - `sampling_rate`: Integer corresponding to the sampling rate of the audio sample. Args: sampling_rate (`int`, *optional*): Target sampling rate. If `None`, the native sampling rate is used. mono (`bool`, defaults to `True`): Whether to convert the audio signal to mono by averaging samples across channels. decode (`bool`, defaults to `True`): Whether to decode the audio data. If `False`, returns the underlying dictionary in the format `{"path": audio_path, "bytes": audio_bytes}`. stream_index (`int`, *optional*): The streaming index to use from the file. If `None` defaults to the "best" index. Example: ```py >>> from datasets import load_dataset, Audio >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train") >>> ds = ds.cast_column("audio", Audio(sampling_rate=44100)) >>> ds[0]["audio"] <datasets.features._torchcodec.AudioDecoder object at 0x11642b6a0> >>> audio = ds[0]["audio"] >>> audio.get_samples_played_in_range(0, 10) AudioSamples: data (shape): torch.Size([2, 110592]) pts_seconds: 0.0 duration_seconds: 2.507755102040816 sample_rate: 44100 ``` """ sampling_rate: Optional[int] = None decode: bool = True stream_index: Optional[int] = None id: Optional[str] = field(default=None, repr=False) # Automatically constructed dtype: ClassVar[str] = "dict" pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()}) _type: str = field(default="Audio", init=False, repr=False) def __call__(self): return self.pa_type def encode_example(self, value: Union[str, bytes, bytearray, dict, "AudioDecoder"]) -> dict: """Encode example into a format for Arrow. Args: value (`str`, `bytes`,`bytearray`,`dict`, `AudioDecoder`): Data passed as input to Audio feature. Returns: `dict` """ try: import soundfile as sf # needed to write audio files except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'.") from err if value is None: raise ValueError("value must be provided") if config.TORCHCODEC_AVAILABLE: from torchcodec.decoders import AudioDecoder else: AudioDecoder = None if isinstance(value, str): return {"bytes": None, "path": value} elif isinstance(value, (bytes, bytearray)): return {"bytes": value, "path": None} elif AudioDecoder is not None and isinstance(value, AudioDecoder): return encode_torchcodec_audio(value) elif "array" in value: # convert the audio array to wav bytes buffer = BytesIO() sf.write(buffer, value["array"].T, value["sampling_rate"], format="wav") return {"bytes": buffer.getvalue(), "path": None} elif value.get("path") is not None and os.path.isfile(value["path"]): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm"): # "PCM" only has raw audio bytes if value.get("sampling_rate") is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object") if value.get("bytes"): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) bytes_value = np.frombuffer(value["bytes"], dtype=np.int16).astype(np.float32) / 32767 else: bytes_value = np.memmap(value["path"], dtype="h", mode="r").astype(np.float32) / 32767 buffer = BytesIO(b"") sf.write(buffer, bytes_value, value["sampling_rate"], format="wav") return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path")} elif value.get("bytes") is not None or value.get("path") is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes"), "path": value.get("path")} else: raise ValueError( f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." ) def decode_example( self, value: dict, token_per_repo_id: Optional[dict[str, Union[str, bool, None]]] = None ) -> "AudioDecoder": """Decode example audio file into audio data. Args: value (`dict`): A dictionary with keys: - `path`: String with relative audio file path. - `bytes`: Bytes of the audio file. token_per_repo_id (`dict`, *optional*): To access and decode audio files from private repositories on the Hub, you can pass a dictionary repo_id (`str`) -> token (`bool` or `str`) Returns: `torchcodec.decoders.AudioDecoder` """ if config.TORCHCODEC_AVAILABLE: from ._torchcodec import AudioDecoder else: raise ImportError("To support decoding audio data, please install 'torchcodec'.") if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.") path, bytes = (value["path"], value["bytes"]) if value["bytes"] is not None else (value["path"], None) if path is None and bytes is None: raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.") if bytes is None and is_local_path(path): audio = AudioDecoder(path, stream_index=self.stream_index, sample_rate=self.sampling_rate) elif bytes is None: token_per_repo_id = token_per_repo_id or {} source_url = path.split("::")[-1] pattern = ( config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL ) source_url_fields = string_to_dict(source_url, pattern) token = token_per_repo_id.get(source_url_fields["repo_id"]) if source_url_fields is not None else None download_config = DownloadConfig(token=token) f = xopen(path, "rb", download_config=download_config) audio = AudioDecoder(f, stream_index=self.stream_index, sample_rate=self.sampling_rate) else: audio = AudioDecoder(bytes, stream_index=self.stream_index, sample_rate=self.sampling_rate) audio._hf_encoded = {"path": path, "bytes": bytes} audio.metadata.path = path return audio def flatten(self) -> Union["FeatureType", dict[str, "FeatureType"]]: """If in the decodable state, raise an error, otherwise flatten the feature into a dictionary.""" from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature.") return { "bytes": Value("binary"), "path": Value("string"), } def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray]) -> pa.StructArray: """Cast an Arrow array to the Audio arrow storage type. The Arrow types that can be converted to the Audio pyarrow storage type are: - `pa.string()` - it must contain the "path" data - `pa.binary()` - it must contain the audio bytes - `pa.struct({"bytes": pa.binary()})` - `pa.struct({"path": pa.string()})` - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter Args: storage (`Union[pa.StringArray, pa.StructArray]`): PyArrow array to cast. Returns: `pa.StructArray`: Array in the Audio arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})` """ if pa.types.is_string(storage.type): bytes_array = pa.array([None] * len(storage), type=pa.binary()) storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_binary(storage.type): path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"): storage = pa.array( [Audio().encode_example(x) if x is not None else None for x in storage.to_numpy(zero_copy_only=False)] ) elif pa.types.is_struct(storage.type): if storage.type.get_field_index("bytes") >= 0: bytes_array = storage.field("bytes") else: bytes_array = pa.array([None] * len(storage), type=pa.binary()) if storage.type.get_field_index("path") >= 0: path_array = storage.field("path") else: path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null()) return array_cast(storage, self.pa_type) def embed_storage(self, storage: pa.StructArray, token_per_repo_id=None) -> pa.StructArray: """Embed audio files into the Arrow array. Args: storage (`pa.StructArray`): PyArrow array to embed. Returns: `pa.StructArray`: Array in the Audio arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})`. """ if token_per_repo_id is None: token_per_repo_id = {} @no_op_if_value_is_null def path_to_bytes(path): source_url = path.split("::")[-1] pattern = ( config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL ) source_url_fields = string_to_dict(source_url, pattern) token = token_per_repo_id.get(source_url_fields["repo_id"]) if source_url_fields is not None else None download_config = DownloadConfig(token=token) with xopen(path, "rb", download_config=download_config) as f: return f.read() bytes_array = pa.array( [ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ], type=pa.binary(), ) path_array = pa.array( [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()], type=pa.string(), ) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()) return array_cast(storage, self.pa_type) def encode_torchcodec_audio(audio: "AudioDecoder") -> dict: if hasattr(audio, "_hf_encoded"): return audio._hf_encoded else: try: import soundfile as sf # needed to write audio files except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'.") from err samples = audio.get_all_samples() array = samples.data.cpu().numpy() buffer = BytesIO() sf.write(buffer, array.T, samples.sample_rate, format="wav") return {"bytes": buffer.getvalue(), "path": None}
datasets/src/datasets/features/audio.py/0
{ "file_path": "datasets/src/datasets/features/audio.py", "repo_id": "datasets", "token_count": 5748 }
106
from itertools import chain from typing import Optional, Union from huggingface_hub import ( CommitInfo, CommitOperationAdd, CommitOperationDelete, DatasetCard, DatasetCardData, HfApi, HfFileSystem, ) import datasets.config from datasets.info import DatasetInfosDict from datasets.load import load_dataset_builder from datasets.utils.metadata import MetadataConfigs def delete_from_hub( repo_id: str, config_name: str, revision: Optional[str] = None, token: Optional[Union[bool, str]] = None, ) -> CommitInfo: """Delete a dataset configuration from a [data-only dataset](repository_structure) on the Hub. Args: repo_id (`str`): ID of the Hub dataset repository, in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. config_name (`str`): Name of the dataset configuration. revision (`str`, *optional*): Branch to delete the configuration from. Defaults to the `"main"` branch. token (`bool` or `str`, *optional*): Authentication token for the Hugging Face Hub. Returns: `huggingface_hub.CommitInfo` """ operations = [] # data_files fs = HfFileSystem(endpoint=datasets.config.HF_ENDPOINT, token=token) builder = load_dataset_builder(repo_id, config_name, revision=revision, token=token) for data_file in chain(*builder.config.data_files.values()): data_file_resolved_path = fs.resolve_path(data_file) if data_file_resolved_path.repo_id == repo_id: operations.append(CommitOperationDelete(path_in_repo=data_file_resolved_path.path_in_repo)) # README.md dataset_card = DatasetCard.load(repo_id) # config_names if dataset_card.data.get("config_names", None) and config_name in dataset_card.data["config_names"]: dataset_card.data["config_names"].remove(config_name) # metadata_configs metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card.data) if metadata_configs: _ = metadata_configs.pop(config_name, None) dataset_card_data = DatasetCardData() metadata_configs.to_dataset_card_data(dataset_card_data) if datasets.config.METADATA_CONFIGS_FIELD in dataset_card_data: dataset_card.data[datasets.config.METADATA_CONFIGS_FIELD] = dataset_card_data[ datasets.config.METADATA_CONFIGS_FIELD ] else: _ = dataset_card.data.pop(datasets.config.METADATA_CONFIGS_FIELD, None) # dataset_info dataset_infos: DatasetInfosDict = DatasetInfosDict.from_dataset_card_data(dataset_card.data) if dataset_infos: _ = dataset_infos.pop(config_name, None) dataset_card_data = DatasetCardData() dataset_infos.to_dataset_card_data(dataset_card_data) if "dataset_info" in dataset_card_data: dataset_card.data["dataset_info"] = dataset_card_data["dataset_info"] else: _ = dataset_card.data.pop("dataset_info", None) # Commit operations.append( CommitOperationAdd(path_in_repo=datasets.config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode()) ) api = HfApi(endpoint=datasets.config.HF_ENDPOINT, token=token) commit_info = api.create_commit( repo_id, operations=operations, commit_message=f"Delete '{config_name}' config", commit_description=f"Delete '{config_name}' config.", token=token, repo_type="dataset", revision=revision, create_pr=True, ) print(f"You can find your PR to delete the dataset config at: {commit_info.pr_url}") return commit_info def _delete_files(dataset_id, revision=None, token=None): hf_api = HfApi(endpoint=datasets.config.HF_ENDPOINT, token=token) repo_files = hf_api.list_repo_files( dataset_id, repo_type="dataset", ) if repo_files: legacy_json_file = [] data_files = [] for filename in repo_files: if filename in {".gitattributes", "README.md"}: continue elif filename == "dataset_infos.json": legacy_json_file.append(filename) else: data_files.append(filename) if legacy_json_file: hf_api.delete_file( "dataset_infos.json", dataset_id, repo_type="dataset", revision=revision, commit_message="Delete legacy dataset_infos.json", ) if data_files: for filename in data_files: hf_api.delete_file( filename, dataset_id, repo_type="dataset", revision=revision, commit_message="Delete data file", )
datasets/src/datasets/hub.py/0
{ "file_path": "datasets/src/datasets/hub.py", "repo_id": "datasets", "token_count": 2184 }
107
import inspect import re from typing import Dict, List, Tuple from huggingface_hub.utils import insecure_hashlib from .arrow import arrow from .audiofolder import audiofolder from .cache import cache from .csv import csv from .hdf5 import hdf5 from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .pdffolder import pdffolder from .sql import sql from .text import text from .videofolder import videofolder from .webdataset import webdataset from .xml import xml def _hash_python_lines(lines: list[str]) -> str: filtered_lines = [] for line in lines: line = re.sub(r"#.*", "", line) # remove comments if line: filtered_lines.append(line) full_str = "\n".join(filtered_lines) # Make a hash from all this code full_bytes = full_str.encode("utf-8") return insecure_hashlib.sha256(full_bytes).hexdigest() # get importable module names and hash for caching _PACKAGED_DATASETS_MODULES = { "csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), "json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), "pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), "parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), "arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), "text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), "imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), "audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), "videofolder": (videofolder.__name__, _hash_python_lines(inspect.getsource(videofolder).splitlines())), "pdffolder": (pdffolder.__name__, _hash_python_lines(inspect.getsource(pdffolder).splitlines())), "webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())), "xml": (xml.__name__, _hash_python_lines(inspect.getsource(xml).splitlines())), "hdf5": (hdf5.__name__, _hash_python_lines(inspect.getsource(hdf5).splitlines())), } # get importable module names and hash for caching _PACKAGED_DATASETS_MODULES_2_15_HASHES = { "csv": "eea64c71ca8b46dd3f537ed218fc9bf495d5707789152eb2764f5c78fa66d59d", "json": "8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96", "pandas": "3ac4ffc4563c796122ef66899b9485a3f1a977553e2d2a8a318c72b8cc6f2202", "parquet": "ca31c69184d9832faed373922c2acccec0b13a0bb5bbbe19371385c3ff26f1d1", "arrow": "74f69db2c14c2860059d39860b1f400a03d11bf7fb5a8258ca38c501c878c137", "text": "c4a140d10f020282918b5dd1b8a49f0104729c6177f60a6b49ec2a365ec69f34", "imagefolder": "7b7ce5247a942be131d49ad4f3de5866083399a0f250901bd8dc202f8c5f7ce5", "audiofolder": "d3c1655c66c8f72e4efb5c79e952975fa6e2ce538473a6890241ddbddee9071c", } # Used to infer the module to use based on the data files extensions _EXTENSION_TO_MODULE: dict[str, tuple[str, dict]] = { ".csv": ("csv", {}), ".tsv": ("csv", {"sep": "\t"}), ".json": ("json", {}), ".jsonl": ("json", {}), # ndjson is no longer maintained (see: https://github.com/ndjson/ndjson-spec/issues/35#issuecomment-1285673417) ".ndjson": ("json", {}), ".parquet": ("parquet", {}), ".geoparquet": ("parquet", {}), ".gpq": ("parquet", {}), ".arrow": ("arrow", {}), ".txt": ("text", {}), ".tar": ("webdataset", {}), ".xml": ("xml", {}), ".hdf5": ("hdf5", {}), ".h5": ("hdf5", {}), } _EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("videofolder", {}) for ext in videofolder.VideoFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("videofolder", {}) for ext in videofolder.VideoFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("pdffolder", {}) for ext in pdffolder.PdfFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("pdffolder", {}) for ext in pdffolder.PdfFolder.EXTENSIONS}) # Used to filter data files based on extensions given a module name _MODULE_TO_EXTENSIONS: dict[str, list[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) for _module in _MODULE_TO_EXTENSIONS: _MODULE_TO_EXTENSIONS[_module].append(".zip") # Used to filter data files based on file names _MODULE_TO_METADATA_FILE_NAMES: Dict[str, List[str]] = {} for _module in _MODULE_TO_EXTENSIONS: _MODULE_TO_METADATA_FILE_NAMES[_module] = [] _MODULE_TO_METADATA_FILE_NAMES["imagefolder"] = imagefolder.ImageFolder.METADATA_FILENAMES _MODULE_TO_METADATA_FILE_NAMES["audiofolder"] = imagefolder.ImageFolder.METADATA_FILENAMES _MODULE_TO_METADATA_FILE_NAMES["videofolder"] = imagefolder.ImageFolder.METADATA_FILENAMES _MODULE_TO_METADATA_FILE_NAMES["pdffolder"] = imagefolder.ImageFolder.METADATA_FILENAMES
datasets/src/datasets/packaged_modules/__init__.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/__init__.py", "repo_id": "datasets", "token_count": 2142 }
108
import datasets from ..folder_based_builder import folder_based_builder logger = datasets.utils.logging.get_logger(__name__) class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig): """BuilderConfig for ImageFolder.""" drop_labels: bool = None drop_metadata: bool = None def __post_init__(self): super().__post_init__() class ImageFolder(folder_based_builder.FolderBasedBuilder): BASE_FEATURE = datasets.Image BASE_COLUMN_NAME = "image" BUILDER_CONFIG_CLASS = ImageFolderConfig EXTENSIONS: list[str] # definition at the bottom of the script # Obtained with: # ``` # import PIL.Image # IMAGE_EXTENSIONS = [] # PIL.Image.init() # for ext, format in PIL.Image.EXTENSION.items(): # if format in PIL.Image.OPEN: # IMAGE_EXTENSIONS.append(ext[1:]) # ``` # We intentionally do not run this code on launch because: # (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed # (2) To ensure the list of supported extensions is deterministic IMAGE_EXTENSIONS = [ ".blp", ".bmp", ".dib", ".bufr", ".cur", ".pcx", ".dcx", ".dds", ".ps", ".eps", ".fit", ".fits", ".fli", ".flc", ".ftc", ".ftu", ".gbr", ".gif", ".grib", # ".h5", # may contain zero or several images # ".hdf", # may contain zero or several images ".png", ".apng", ".jp2", ".j2k", ".jpc", ".jpf", ".jpx", ".j2c", ".icns", ".ico", ".im", ".iim", ".tif", ".tiff", ".jfif", ".jpe", ".jpg", ".jpeg", ".mpg", ".mpeg", ".msp", ".pcd", ".pxr", ".pbm", ".pgm", ".ppm", ".pnm", ".psd", ".bw", ".rgb", ".rgba", ".sgi", ".ras", ".tga", ".icb", ".vda", ".vst", ".webp", ".wmf", ".emf", ".xbm", ".xpm", ] ImageFolder.EXTENSIONS = IMAGE_EXTENSIONS
datasets/src/datasets/packaged_modules/imagefolder/imagefolder.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/imagefolder/imagefolder.py", "repo_id": "datasets", "token_count": 887 }
109
import datasets from ..folder_based_builder import folder_based_builder logger = datasets.utils.logging.get_logger(__name__) class VideoFolderConfig(folder_based_builder.FolderBasedBuilderConfig): """BuilderConfig for ImageFolder.""" drop_labels: bool = None drop_metadata: bool = None def __post_init__(self): super().__post_init__() class VideoFolder(folder_based_builder.FolderBasedBuilder): BASE_FEATURE = datasets.Video BASE_COLUMN_NAME = "video" BUILDER_CONFIG_CLASS = VideoFolderConfig EXTENSIONS: list[str] # definition at the bottom of the script # TODO: initial list, we should check the compatibility of other formats VIDEO_EXTENSIONS = [ ".mkv", ".mp4", ".avi", ".mpeg", ".mov", ] VideoFolder.EXTENSIONS = VIDEO_EXTENSIONS
datasets/src/datasets/packaged_modules/videofolder/videofolder.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/videofolder/videofolder.py", "repo_id": "datasets", "token_count": 279 }
110
import enum import inspect import warnings from functools import wraps from typing import Callable, Optional from .logging import get_logger _emitted_deprecation_warnings = set() logger = get_logger(__name__) def deprecated(help_message: Optional[str] = None): """Decorator to mark a class or a function as deprecated. Args: help_message (:obj:`str`, optional): An optional message to guide the user on how to switch to non-deprecated usage of the library. """ def decorator(deprecated_class_or_function: Callable): global _emitted_deprecation_warnings if inspect.isclass(deprecated_class_or_function): deprecated_function = deprecated_class_or_function.__init__ name = deprecated_class_or_function.__name__ else: deprecated_function = deprecated_class_or_function name = deprecated_function.__name__ # Support deprecating __init__ class method: class name instead name = name if name != "__init__" else deprecated_function.__qualname__.split(".")[-2] warning_msg = ( f"{name} is deprecated and will be removed in the next major version of datasets." + f" {help_message}" if help_message else "" ) @wraps(deprecated_function) def wrapper(*args, **kwargs): func_hash = hash(deprecated_function) if func_hash not in _emitted_deprecation_warnings: warnings.warn(warning_msg, category=FutureWarning, stacklevel=2) _emitted_deprecation_warnings.add(func_hash) return deprecated_function(*args, **kwargs) wrapper._decorator_name_ = "deprecated" if inspect.isclass(deprecated_class_or_function): deprecated_class_or_function.__init__ = wrapper return deprecated_class_or_function else: return wrapper return decorator class OnAccess(enum.EnumMeta): """ Enum metaclass that calls a user-specified function whenever a member is accessed. """ def __getattribute__(cls, name): obj = super().__getattribute__(name) if isinstance(obj, enum.Enum) and obj._on_access: obj._on_access() return obj def __getitem__(cls, name): member = super().__getitem__(name) if member._on_access: member._on_access() return member def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1): obj = super().__call__(value, names, module=module, qualname=qualname, type=type, start=start) if isinstance(obj, enum.Enum) and obj._on_access: obj._on_access() return obj class DeprecatedEnum(enum.Enum, metaclass=OnAccess): """ Enum class that calls `deprecate` method whenever a member is accessed. """ def __new__(cls, value): member = object.__new__(cls) member._value_ = value member._on_access = member.deprecate return member @property def help_message(self): return "" def deprecate(self): help_message = f" {self.help_message}" if self.help_message else "" warnings.warn( f"'{self.__objclass__.__name__}' is deprecated and will be removed in the next major version of datasets." + help_message, FutureWarning, stacklevel=3, )
datasets/src/datasets/utils/deprecation_utils.py/0
{ "file_path": "datasets/src/datasets/utils/deprecation_utils.py", "repo_id": "datasets", "token_count": 1426 }
111
name: "" # Filename comes here allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null # meaning it should not be checked. - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: true allow_empty_text: true subsections: null - name: "Dataset Structure" allow_empty: false allow_empty_text: true subsections: - name: "Data Instances" allow_empty: false allow_empty_text: true subsections: null - name: "Data Fields" allow_empty: false allow_empty_text: true subsections: null - name: "Data Splits" allow_empty: false allow_empty_text: true subsections: null - name: "Dataset Creation" allow_empty: false allow_empty_text: true subsections: - name: "Curation Rationale" allow_empty: true allow_empty_text: true subsections: null - name: "Source Data" allow_empty: false allow_empty_text: true subsections: - name: "Initial Data Collection and Normalization" allow_empty: true allow_empty_text: true subsections: null - name: "Who are the source language producers?" allow_empty: true allow_empty_text: true subsections: null - name: "Annotations" allow_empty: false allow_empty_text: true subsections: - name: "Annotation process" allow_empty: true allow_empty_text: true subsections: null - name: "Who are the annotators?" allow_empty: true allow_empty_text: true subsections: null - name: "Personal and Sensitive Information" allow_empty: true allow_empty_text: true subsections: null - name: "Considerations for Using the Data" allow_empty: true allow_empty_text: true subsections: - name: "Social Impact of Dataset" allow_empty: true allow_empty_text: true subsections: null - name: "Discussion of Biases" allow_empty: true allow_empty_text: true subsections: null - name: "Other Known Limitations" allow_empty: true allow_empty_text: true subsections: null - name: "Additional Information" allow_empty: true allow_empty_text: true subsections: - name: "Dataset Curators" allow_empty: true allow_empty_text: true subsections: null - name: "Licensing Information" allow_empty: true allow_empty_text: true subsections: null - name: "Citation Information" allow_empty: false allow_empty_text: true subsections: null - name: "Contributions" allow_empty: false allow_empty_text: false subsections: null
datasets/src/datasets/utils/resources/readme_structure.yaml/0
{ "file_path": "datasets/src/datasets/utils/resources/readme_structure.yaml", "repo_id": "datasets", "token_count": 1924 }
112
import pytest import datasets import datasets.config # Import fixture modules as plugins pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def pytest_collection_modifyitems(config, items): # Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit") for item in items: if any(marker in item.keywords for marker in ["integration", "unit"]): continue item.add_marker(pytest.mark.unit) @pytest.fixture(autouse=True) def set_test_cache_config(tmp_path_factory, monkeypatch): # test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work? test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache" test_hf_datasets_cache = test_hf_cache_home / "datasets" monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache)) test_downloaded_datasets_path = test_hf_datasets_cache / "downloads" monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path)) test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted" monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path)) @pytest.fixture(autouse=True) def disable_implicit_token(monkeypatch): monkeypatch.setattr("huggingface_hub.constants.HF_HUB_DISABLE_IMPLICIT_TOKEN", True) @pytest.fixture(autouse=True, scope="session") def disable_tqdm_output(): datasets.disable_progress_bar() @pytest.fixture(autouse=True) def set_update_download_counts_to_false(monkeypatch): # don't take tests into account when counting downloads monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False) @pytest.fixture def set_sqlalchemy_silence_uber_warning(monkeypatch): # Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0 # To be removed once SQLAlchemy 2.0 supported try: monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True) except (ModuleNotFoundError, AttributeError): pass @pytest.fixture(autouse=True, scope="session") def zero_time_out_for_remote_code(): datasets.config.TIME_OUT_REMOTE_CODE = 0
datasets/tests/conftest.py/0
{ "file_path": "datasets/tests/conftest.py", "repo_id": "datasets", "token_count": 853 }
113
import pytest from datasets import Dataset, Features, Pdf from ..utils import require_pdfplumber @require_pdfplumber @pytest.mark.parametrize( "build_example", [ lambda pdf_path: pdf_path, lambda pdf_path: open(pdf_path, "rb").read(), lambda pdf_path: {"path": pdf_path}, lambda pdf_path: {"path": pdf_path, "bytes": None}, lambda pdf_path: {"path": pdf_path, "bytes": open(pdf_path, "rb").read()}, lambda pdf_path: {"path": None, "bytes": open(pdf_path, "rb").read()}, lambda pdf_path: {"bytes": open(pdf_path, "rb").read()}, ], ) def test_pdf_feature_encode_example(shared_datadir, build_example): import pdfplumber pdf_path = str(shared_datadir / "test_pdf.pdf") pdf = Pdf() encoded_example = pdf.encode_example(build_example(pdf_path)) assert isinstance(encoded_example, dict) assert encoded_example.keys() == {"bytes", "path"} assert encoded_example["bytes"] is not None or encoded_example["path"] is not None decoded_example = pdf.decode_example(encoded_example) assert isinstance(decoded_example, pdfplumber.pdf.PDF) @require_pdfplumber def test_dataset_with_pdf_feature(shared_datadir): import pdfplumber pdf_path = str(shared_datadir / "test_pdf.pdf") data = {"pdf": [pdf_path]} features = Features({"pdf": Pdf()}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"pdf"} assert isinstance(item["pdf"], pdfplumber.pdf.PDF) batch = dset[:1] assert len(batch) == 1 assert batch.keys() == {"pdf"} assert isinstance(batch["pdf"], list) and all(isinstance(item, pdfplumber.pdf.PDF) for item in batch["pdf"]) column = dset["pdf"] assert len(column) == 1 assert isinstance(column, list) and all(isinstance(item, pdfplumber.pdf.PDF) for item in column) # from bytes with open(pdf_path, "rb") as f: data = {"pdf": [f.read()]} dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"pdf"} assert isinstance(item["pdf"], pdfplumber.pdf.PDF)
datasets/tests/features/test_pdf.py/0
{ "file_path": "datasets/tests/features/test_pdf.py", "repo_id": "datasets", "token_count": 826 }
114
import pyarrow as pa import pytest from datasets.builder import InvalidConfigName from datasets.data_files import DataFilesList from datasets.packaged_modules.arrow.arrow import Arrow, ArrowConfig @pytest.fixture def arrow_file_streaming_format(tmp_path): filename = tmp_path / "stream.arrow" testdata = [[1, 1, 1], [0, 100, 6], [1, 90, 900]] schema = pa.schema([pa.field("input_ids", pa.list_(pa.int32()))]) array = pa.array(testdata, type=pa.list_(pa.int32())) table = pa.Table.from_arrays([array], schema=schema) with open(filename, "wb") as f: with pa.ipc.new_stream(f, schema) as writer: writer.write_table(table) return str(filename) @pytest.fixture def arrow_file_file_format(tmp_path): filename = tmp_path / "file.arrow" testdata = [[1, 1, 1], [0, 100, 6], [1, 90, 900]] schema = pa.schema([pa.field("input_ids", pa.list_(pa.int32()))]) array = pa.array(testdata, type=pa.list_(pa.int32())) table = pa.Table.from_arrays([array], schema=schema) with open(filename, "wb") as f: with pa.ipc.new_file(f, schema) as writer: writer.write_table(table) return str(filename) @pytest.mark.parametrize( "file_fixture, config_kwargs", [ ("arrow_file_streaming_format", {}), ("arrow_file_file_format", {}), ], ) def test_arrow_generate_tables(file_fixture, config_kwargs, request): arrow = Arrow(**config_kwargs) generator = arrow._generate_tables([[request.getfixturevalue(file_fixture)]]) pa_table = pa.concat_tables([table for _, table in generator]) expected = {"input_ids": [[1, 1, 1], [0, 100, 6], [1, 90, 900]]} assert pa_table.to_pydict() == expected def test_config_raises_when_invalid_name() -> None: with pytest.raises(InvalidConfigName, match="Bad characters"): _ = ArrowConfig(name="name-with-*-invalid-character") @pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])]) def test_config_raises_when_invalid_data_files(data_files) -> None: with pytest.raises(ValueError, match="Expected a DataFilesDict"): _ = ArrowConfig(name="name", data_files=data_files)
datasets/tests/packaged_modules/test_arrow.py/0
{ "file_path": "datasets/tests/packaged_modules/test_arrow.py", "repo_id": "datasets", "token_count": 873 }
115
import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import Array2D, ClassLabel, Features, Image, Value from datasets.features.features import Array2DExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class TypedSequenceTest(TestCase): def test_no_type(self): arr = pa.array(TypedSequence([1, 2, 3])) self.assertEqual(arr.type, pa.int64()) def test_array_type_forbidden(self): with self.assertRaises(ValueError): _ = pa.array(TypedSequence([1, 2, 3]), type=pa.int64()) def test_try_type_and_type_forbidden(self): with self.assertRaises(ValueError): _ = pa.array(TypedSequence([1, 2, 3], try_type=Value("bool"), type=Value("int64"))) def test_compatible_type(self): arr = pa.array(TypedSequence([1, 2, 3], type=Value("int32"))) self.assertEqual(arr.type, pa.int32()) def test_incompatible_type(self): with self.assertRaises((TypeError, pa.lib.ArrowInvalid)): _ = pa.array(TypedSequence(["foo", "bar"], type=Value("int64"))) def test_try_compatible_type(self): arr = pa.array(TypedSequence([1, 2, 3], try_type=Value("int32"))) self.assertEqual(arr.type, pa.int32()) def test_try_incompatible_type(self): arr = pa.array(TypedSequence(["foo", "bar"], try_type=Value("int64"))) self.assertEqual(arr.type, pa.string()) def test_compatible_extension_type(self): arr = pa.array(TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64"))) self.assertEqual(arr.type, Array2DExtensionType((1, 3), "int64")) def test_incompatible_extension_type(self): with self.assertRaises((TypeError, pa.lib.ArrowInvalid)): _ = pa.array(TypedSequence(["foo", "bar"], type=Array2D((1, 3), "int64"))) def test_try_compatible_extension_type(self): arr = pa.array(TypedSequence([[[1, 2, 3]]], try_type=Array2D((1, 3), "int64"))) self.assertEqual(arr.type, Array2DExtensionType((1, 3), "int64")) def test_try_incompatible_extension_type(self): arr = pa.array(TypedSequence(["foo", "bar"], try_type=Array2D((1, 3), "int64"))) self.assertEqual(arr.type, pa.string()) @require_pil def test_exhaustive_cast(self): import PIL.Image pil_image = PIL.Image.fromarray(np.arange(10, dtype=np.uint8).reshape(2, 5)) with patch( "datasets.arrow_writer.cast_to_python_objects", side_effect=cast_to_python_objects ) as mock_cast_to_python_objects: _ = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image], type=Image())) args, kwargs = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("optimize_list_casting", kwargs) self.assertFalse(kwargs["optimize_list_casting"]) def _check_output(output, expected_num_chunks: int): stream = pa.BufferReader(output) if isinstance(output, pa.Buffer) else pa.memory_map(output) f = pa.ipc.open_stream(stream) pa_table: pa.Table = f.read_all() assert len(pa_table.to_batches()) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) @pytest.mark.parametrize( "fields", [ None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}, {"col_2": pa.int64(), "col_1": pa.string()}, ], ) def test_write(fields, writer_batch_size): output = pa.BufferOutputStream() schema = pa.schema(fields) if fields else None with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer: writer.write({"col_1": "foo", "col_2": 1}) writer.write({"col_1": "bar", "col_2": 2}) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: fields = {"col_1": pa.string(), "col_2": pa.int64()} assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) def test_write_with_features(): output = pa.BufferOutputStream() features = Features({"labels": ClassLabel(names=["neg", "pos"])}) with ArrowWriter(stream=output, features=features) as writer: writer.write({"labels": 0}) writer.write({"labels": 1}) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata stream = pa.BufferReader(output.getvalue()) f = pa.ipc.open_stream(stream) pa_table: pa.Table = f.read_all() schema = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(schema) @pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) def test_key_datatype(writer_batch_size): output = pa.BufferOutputStream() with ArrowWriter( stream=output, writer_batch_size=writer_batch_size, hash_salt="split_name", check_duplicates=True, ) as writer: with pytest.raises(InvalidKeyError): writer.write({"col_1": "foo", "col_2": 1}, key=[1, 2]) num_examples, num_bytes = writer.finalize() @pytest.mark.parametrize("writer_batch_size", [None, 2, 10]) def test_duplicate_keys(writer_batch_size): output = pa.BufferOutputStream() with ArrowWriter( stream=output, writer_batch_size=writer_batch_size, hash_salt="split_name", check_duplicates=True, ) as writer: with pytest.raises(DuplicatedKeysError): writer.write({"col_1": "foo", "col_2": 1}, key=10) writer.write({"col_1": "bar", "col_2": 2}, key=10) num_examples, num_bytes = writer.finalize() @pytest.mark.parametrize("writer_batch_size", [None, 2, 10]) def test_write_with_keys(writer_batch_size): output = pa.BufferOutputStream() with ArrowWriter( stream=output, writer_batch_size=writer_batch_size, hash_salt="split_name", check_duplicates=True, ) as writer: writer.write({"col_1": "foo", "col_2": 1}, key=1) writer.write({"col_1": "bar", "col_2": 2}, key=2) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) @pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) @pytest.mark.parametrize( "fields", [None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}] ) def test_write_batch(fields, writer_batch_size): output = pa.BufferOutputStream() schema = pa.schema(fields) if fields else None with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer: writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]}) writer.write_batch({"col_1": [], "col_2": []}) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: fields = {"col_1": pa.string(), "col_2": pa.int64()} assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) @pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) @pytest.mark.parametrize( "fields", [None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}] ) def test_write_table(fields, writer_batch_size): output = pa.BufferOutputStream() schema = pa.schema(fields) if fields else None with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer: writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]})) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: fields = {"col_1": pa.string(), "col_2": pa.int64()} assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) @pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) @pytest.mark.parametrize( "fields", [None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}] ) def test_write_row(fields, writer_batch_size): output = pa.BufferOutputStream() schema = pa.schema(fields) if fields else None with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer: writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]})) writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]})) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: fields = {"col_1": pa.string(), "col_2": pa.int64()} assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) def test_write_file(): with tempfile.TemporaryDirectory() as tmp_dir: fields = {"col_1": pa.string(), "col_2": pa.int64()} output = os.path.join(tmp_dir, "test.arrow") with ArrowWriter(path=output, schema=pa.schema(fields)) as writer: writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]}) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) _check_output(output, 1) def get_base_dtype(arr_type): if pa.types.is_list(arr_type): return get_base_dtype(arr_type.value_type) else: return arr_type def change_first_primitive_element_in_list(lst, value): if isinstance(lst[0], list): change_first_primitive_element_in_list(lst[0], value) else: lst[0] = value @pytest.mark.parametrize("optimized_int_type, expected_dtype", [(None, pa.int64()), (Value("int32"), pa.int32())]) @pytest.mark.parametrize("sequence", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]]) def test_optimized_int_type_for_typed_sequence(sequence, optimized_int_type, expected_dtype): arr = pa.array(TypedSequence(sequence, optimized_int_type=optimized_int_type)) assert get_base_dtype(arr.type) == expected_dtype @pytest.mark.parametrize( "col, expected_dtype", [ ("attention_mask", pa.int8()), ("special_tokens_mask", pa.int8()), ("token_type_ids", pa.int8()), ("input_ids", pa.int32()), ("other", pa.int64()), ], ) @pytest.mark.parametrize("sequence", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]]) def test_optimized_typed_sequence(sequence, col, expected_dtype): # in range arr = pa.array(OptimizedTypedSequence(sequence, col=col)) assert get_base_dtype(arr.type) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications sequence = copy.deepcopy(sequence) value = np.iinfo(expected_dtype.to_pandas_dtype()).max + 1 change_first_primitive_element_in_list(sequence, value) arr = pa.array(OptimizedTypedSequence(sequence, col=col)) assert get_base_dtype(arr.type) == pa.int64() @pytest.mark.parametrize("raise_exception", [False, True]) def test_arrow_writer_closes_stream(raise_exception, tmp_path): path = str(tmp_path / "dataset-train.arrow") try: with ArrowWriter(path=path) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def test_arrow_writer_with_filesystem(mockfs): path = "mock://dataset-train.arrow" with ArrowWriter(path=path, storage_options=mockfs.storage_options) as writer: assert isinstance(writer._fs, type(mockfs)) assert writer._fs.storage_options == mockfs.storage_options writer.write({"col_1": "foo", "col_2": 1}) writer.write({"col_1": "bar", "col_2": 2}) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(path) def test_parquet_writer_write(): output = pa.BufferOutputStream() with ParquetWriter(stream=output) as writer: writer.write({"col_1": "foo", "col_2": 1}) writer.write({"col_1": "bar", "col_2": 2}) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 stream = pa.BufferReader(output.getvalue()) pa_table: pa.Table = pq.read_table(stream) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("embed_local_files", [False, True]) def test_writer_embed_local_files(tmp_path, embed_local_files): import PIL.Image image_path = str(tmp_path / "test_image_rgb.jpg") PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uint8)).save(image_path, format="png") output = pa.BufferOutputStream() with ParquetWriter( stream=output, features=Features({"image": Image()}), embed_local_files=embed_local_files ) as writer: writer.write({"image": image_path}) writer.finalize() stream = pa.BufferReader(output.getvalue()) pa_table: pa.Table = pq.read_table(stream) out = pa_table.to_pydict() if embed_local_files: assert isinstance(out["image"][0]["path"], str) with open(image_path, "rb") as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def test_always_nullable(): non_nullable_schema = pa.schema([pa.field("col_1", pa.string(), nullable=False)]) output = pa.BufferOutputStream() with ArrowWriter(stream=output) as writer: writer._build_writer(inferred_schema=non_nullable_schema) assert writer._schema == pa.schema([pa.field("col_1", pa.string())])
datasets/tests/test_arrow_writer.py/0
{ "file_path": "datasets/tests/test_arrow_writer.py", "repo_id": "datasets", "token_count": 6236 }
116
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict from datasets.utils.py_utils import asdict @pytest.mark.parametrize( "files", [ ["full:README.md", "dataset_infos.json"], ["empty:README.md", "dataset_infos.json"], ["dataset_infos.json"], ["full:README.md"], ], ) def test_from_dir(files, tmp_path_factory): dataset_infos_dir = tmp_path_factory.mktemp("dset_infos_dir") if "full:README.md" in files: with open(dataset_infos_dir / "README.md", "w") as f: f.write("---\ndataset_info:\n dataset_size: 42\n---") if "empty:README.md" in files: with open(dataset_infos_dir / "README.md", "w") as f: f.write("") # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / "dataset_infos.json", "w") as f: f.write('{"default": {"dataset_size": 42}}') dataset_infos = DatasetInfosDict.from_directory(dataset_infos_dir) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( "dataset_info", [ DatasetInfo(), DatasetInfo( description="foo", features=Features({"a": Value("int32")}), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ), ], ) def test_dataset_info_dump_and_reload(tmp_path, dataset_info: DatasetInfo): tmp_path = str(tmp_path) dataset_info.write_to_directory(tmp_path) reloaded = DatasetInfo.from_directory(tmp_path) assert dataset_info == reloaded assert os.path.exists(os.path.join(tmp_path, "dataset_info.json")) def test_dataset_info_to_yaml_dict(): dataset_info = DatasetInfo( description="foo", citation="bar", homepage="https://foo.bar", license="CC0", features=Features({"a": Value("int32")}), post_processed={}, supervised_keys=(), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train", "num_examples": 42}], download_checksums={}, download_size=1337, post_processing_size=442, dataset_size=1234, size_in_bytes=1337 + 442 + 1234, ) dataset_info_yaml_dict = dataset_info._to_yaml_dict() assert sorted(dataset_info_yaml_dict) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str)) dataset_info_yaml = yaml.safe_dump(dataset_info_yaml_dict) reloaded = yaml.safe_load(dataset_info_yaml) assert dataset_info_yaml_dict == reloaded def test_dataset_info_to_yaml_dict_empty(): dataset_info = DatasetInfo() dataset_info_yaml_dict = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( "dataset_infos_dict", [ DatasetInfosDict(), DatasetInfosDict({"default": DatasetInfo()}), DatasetInfosDict({"my_config_name": DatasetInfo()}), DatasetInfosDict( { "default": DatasetInfo( description="foo", features=Features({"a": Value("int32")}), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ) } ), DatasetInfosDict( { "v1": DatasetInfo(dataset_size=42), "v2": DatasetInfo(dataset_size=1337), } ), ], ) def test_dataset_infos_dict_dump_and_reload(tmp_path, dataset_infos_dict: DatasetInfosDict): tmp_path = str(tmp_path) dataset_infos_dict.write_to_directory(tmp_path) reloaded = DatasetInfosDict.from_directory(tmp_path) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): dataset_info.config_name = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml dataset_infos_dict[config_name] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict()) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(tmp_path, "README.md")) @pytest.mark.parametrize( "dataset_info", [ None, DatasetInfo(), DatasetInfo( description="foo", features=Features({"a": Value("int32")}), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, dataset_name="dataset_name", ), ], ) def test_from_merge_same_dataset_infos(dataset_info): num_elements = 3 if dataset_info is not None: dataset_info_list = [dataset_info.copy() for _ in range(num_elements)] else: dataset_info_list = [None] * num_elements dataset_info_merged = DatasetInfo.from_merge(dataset_info_list) if dataset_info is not None: assert dataset_info == dataset_info_merged else: assert DatasetInfo() == dataset_info_merged def test_dataset_info_from_dict_with_large_list(): dataset_info_dict = { "citation": "", "description": "", "features": {"col_1": {"feature": {"dtype": "int64", "_type": "Value"}, "_type": "LargeList"}}, "homepage": "", "license": "", } dataset_info = DatasetInfo.from_dict(dataset_info_dict) assert asdict(dataset_info) == dataset_info_dict
datasets/tests/test_info.py/0
{ "file_path": "datasets/tests/test_info.py", "repo_id": "datasets", "token_count": 2860 }
117
import fnmatch import gc import os import shutil import tempfile import textwrap import time import unittest from io import BytesIO from pathlib import Path from unittest.mock import patch import numpy as np import pytest from huggingface_hub import DatasetCard, HfApi from datasets import ( Audio, ClassLabel, Dataset, DatasetDict, DownloadManager, Features, Image, IterableDatasetDict, List, Value, load_dataset, load_dataset_builder, ) from datasets.config import METADATA_CONFIGS_FIELD from datasets.data_files import get_data_patterns from datasets.exceptions import DatasetNotFoundError from datasets.packaged_modules.folder_based_builder.folder_based_builder import ( FolderBasedBuilder, FolderBasedBuilderConfig, ) from datasets.utils.file_utils import cached_path from datasets.utils.hub import hf_dataset_url from .fixtures.hub import CI_HUB_ENDPOINT, CI_HUB_USER, CI_HUB_USER_TOKEN from .utils import for_all_test_methods, require_pil, require_sndfile, require_torchcodec, xfail_if_500_502_http_error pytestmark = pytest.mark.integration @for_all_test_methods(xfail_if_500_502_http_error) @pytest.mark.usefixtures("ci_hub_config") class TestPushToHub: _api = HfApi(endpoint=CI_HUB_ENDPOINT) _token = CI_HUB_USER_TOKEN def test_push_dataset_dict_to_hub_no_token(self, temporary_repo, set_ci_hub_access_token): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there is a single file on the repository that has the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset")) assert files == [".gitattributes", "README.md", "data/train-00000-of-00001.parquet"] def test_push_dataset_dict_to_hub_name_without_namespace(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name.split("/")[-1], token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there is a single file on the repository that has the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset")) assert files == [".gitattributes", "README.md", "data/train-00000-of-00001.parquet"] def test_push_dataset_dict_to_hub_datasets_with_different_features(self, cleanup_repo): ds_train = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) ds_test = Dataset.from_dict({"x": [True, False, True], "y": ["a", "b", "c"]}) local_ds = DatasetDict({"train": ds_train, "test": ds_test}) ds_name = f"{CI_HUB_USER}/test-{int(time.time() * 10e6)}" try: with pytest.raises(ValueError): local_ds.push_to_hub(ds_name.split("/")[-1], token=self._token) except AssertionError: cleanup_repo(ds_name) raise def test_push_dataset_dict_to_hub_private(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token, private=True) hub_ds = load_dataset(ds_name, download_mode="force_redownload", token=self._token) assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there is a single file on the repository that has the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) assert files == [".gitattributes", "README.md", "data/train-00000-of-00001.parquet"] def test_push_dataset_dict_to_hub(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there is a single file on the repository that has the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) assert files == [".gitattributes", "README.md", "data/train-00000-of-00001.parquet"] def test_push_dataset_dict_to_hub_with_pull_request(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token, create_pr=True) hub_ds = load_dataset(ds_name, revision="refs/pr/1", download_mode="force_redownload") assert local_ds["train"].features == hub_ds["train"].features assert list(local_ds.keys()) == list(hub_ds.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there is a single file on the repository that has the correct name files = sorted( self._api.list_repo_files(ds_name, revision="refs/pr/1", repo_type="dataset", token=self._token) ) assert files == [".gitattributes", "README.md", "data/train-00000-of-00001.parquet"] def test_push_dataset_dict_to_hub_with_revision(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token, revision="dev") hub_ds = load_dataset(ds_name, revision="dev", download_mode="force_redownload") assert local_ds["train"].features == hub_ds["train"].features assert list(local_ds.keys()) == list(hub_ds.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there is a single file on the repository that has the correct name files = sorted(self._api.list_repo_files(ds_name, revision="dev", repo_type="dataset", token=self._token)) assert files == [".gitattributes", "README.md", "data/train-00000-of-00001.parquet"] def test_push_dataset_dict_to_hub_multiple_files(self, temporary_repo): ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: with patch("datasets.config.MAX_SHARD_SIZE", "16KB"): local_ds.push_to_hub(ds_name, token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there are two files on the repository that have the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) assert files == [ ".gitattributes", "README.md", "data/train-00000-of-00002.parquet", "data/train-00001-of-00002.parquet", ] def test_push_dataset_dict_to_hub_multiple_files_with_max_shard_size(self, temporary_repo): ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token, max_shard_size="16KB") hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there are two files on the repository that have the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) assert files == [ ".gitattributes", "README.md", "data/train-00000-of-00002.parquet", "data/train-00001-of-00002.parquet", ] def test_push_dataset_dict_to_hub_multiple_files_with_num_shards(self, temporary_repo): ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token, num_shards={"train": 2}) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there are two files on the repository that have the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) assert files == [ ".gitattributes", "README.md", "data/train-00000-of-00002.parquet", "data/train-00001-of-00002.parquet", ] def test_push_dataset_dict_to_hub_with_multiple_commits(self, temporary_repo): ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: self._api.create_repo(ds_name, token=self._token, repo_type="dataset") num_commits_before_push = len(self._api.list_repo_commits(ds_name, repo_type="dataset", token=self._token)) with ( patch("datasets.config.MAX_SHARD_SIZE", "16KB"), patch("datasets.config.UPLOADS_MAX_NUMBER_PER_COMMIT", 1), ): local_ds.push_to_hub(ds_name, token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there are two files on the repository that have the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) assert files == [ ".gitattributes", "README.md", "data/train-00000-of-00002.parquet", "data/train-00001-of-00002.parquet", ] num_commits_after_push = len(self._api.list_repo_commits(ds_name, repo_type="dataset", token=self._token)) assert num_commits_after_push - num_commits_before_push > 1 def test_push_dataset_dict_to_hub_overwrite_files(self, temporary_repo): ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))}) ds2 = Dataset.from_dict({"x": list(range(100)), "y": list(range(100))}) local_ds = DatasetDict({"train": ds, "random": ds2}) # Push to hub two times, but the second time with a larger amount of files. # Verify that the new files contain the correct dataset. with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token) with tempfile.TemporaryDirectory() as tmp: # Add a file starting with "data" to ensure it doesn't get deleted. path = Path(tmp) / "datafile.txt" with open(path, "w") as f: f.write("Bogus file") self._api.upload_file( path_or_fileobj=str(path), path_in_repo="datafile.txt", repo_id=ds_name, repo_type="dataset", token=self._token, ) local_ds.push_to_hub(ds_name, token=self._token, max_shard_size=500 << 5) # Ensure that there are two files on the repository that have the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) assert files == [ ".gitattributes", "README.md", "data/random-00000-of-00001.parquet", "data/train-00000-of-00002.parquet", "data/train-00001-of-00002.parquet", "datafile.txt", ] self._api.delete_file("datafile.txt", repo_id=ds_name, repo_type="dataset", token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features del hub_ds # To ensure the reference to the memory-mapped Arrow file is dropped to avoid the PermissionError on Windows gc.collect() # Push to hub two times, but the second time with fewer files. # Verify that the new files contain the correct dataset and that non-necessary files have been deleted. with temporary_repo(ds_name): local_ds.push_to_hub(ds_name, token=self._token, max_shard_size=500 << 5) with tempfile.TemporaryDirectory() as tmp: # Add a file starting with "data" to ensure it doesn't get deleted. path = Path(tmp) / "datafile.txt" with open(path, "w") as f: f.write("Bogus file") self._api.upload_file( path_or_fileobj=str(path), path_in_repo="datafile.txt", repo_id=ds_name, repo_type="dataset", token=self._token, ) local_ds.push_to_hub(ds_name, token=self._token) # Ensure that there are two files on the repository that have the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) assert files == [ ".gitattributes", "README.md", "data/random-00000-of-00001.parquet", "data/train-00000-of-00001.parquet", "datafile.txt", ] # Keeping the "datafile.txt" breaks the load_dataset to think it's a text-based dataset self._api.delete_file("datafile.txt", repo_id=ds_name, repo_type="dataset", token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features def test_push_dataset_to_hub(self, temporary_repo): local_ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, split="train", token=self._token) local_ds_dict = {"train": local_ds} hub_ds_dict = load_dataset(ds_name, download_mode="force_redownload") assert list(local_ds_dict.keys()) == list(hub_ds_dict.keys()) for ds_split_name in local_ds_dict.keys(): local_ds = local_ds_dict[ds_split_name] hub_ds = hub_ds_dict[ds_split_name] assert local_ds.column_names == hub_ds.column_names assert list(local_ds.features.keys()) == list(hub_ds.features.keys()) assert local_ds.features == hub_ds.features def test_push_dataset_to_hub_custom_features(self, temporary_repo): features = Features({"x": Value("int64"), "y": ClassLabel(names=["neg", "pos"])}) ds = Dataset.from_dict({"x": [1, 2, 3], "y": [0, 0, 1]}, features=features) with temporary_repo() as ds_name: ds.push_to_hub(ds_name, token=self._token) hub_ds = load_dataset(ds_name, split="train", download_mode="force_redownload") assert ds.column_names == hub_ds.column_names assert list(ds.features.keys()) == list(hub_ds.features.keys()) assert ds.features == hub_ds.features assert ds[:] == hub_ds[:] @require_torchcodec @require_sndfile def test_push_dataset_to_hub_custom_features_audio(self, temporary_repo): audio_path = os.path.join(os.path.dirname(__file__), "features", "data", "test_audio_44100.wav") data = {"x": [audio_path, None], "y": [0, -1]} features = Features({"x": Audio(), "y": Value("int32")}) ds = Dataset.from_dict(data, features=features) for embed_external_files in [True, False]: with temporary_repo() as ds_name: ds.push_to_hub(ds_name, embed_external_files=embed_external_files, token=self._token) hub_ds = load_dataset(ds_name, split="train", download_mode="force_redownload") assert ds.column_names == hub_ds.column_names assert list(ds.features.keys()) == list(hub_ds.features.keys()) assert ds.features == hub_ds.features np.testing.assert_equal( ds[0]["x"].get_all_samples().data.cpu().numpy(), hub_ds[0]["x"].get_all_samples().data.cpu().numpy(), ) assert ds[1] == hub_ds[1] # don't test hub_ds[0] since audio decoding might be slightly different hub_ds = hub_ds.cast_column("x", Audio(decode=False)) elem = hub_ds[0]["x"] path, bytes_ = elem["path"], elem["bytes"] assert isinstance(path, str) assert os.path.basename(path) == "test_audio_44100.wav" assert bool(bytes_) == embed_external_files @require_pil def test_push_dataset_to_hub_custom_features_image(self, temporary_repo): image_path = os.path.join(os.path.dirname(__file__), "features", "data", "test_image_rgb.jpg") data = {"x": [image_path, None], "y": [0, -1]} features = Features({"x": Image(), "y": Value("int32")}) ds = Dataset.from_dict(data, features=features) for embed_external_files in [True, False]: with temporary_repo() as ds_name: ds.push_to_hub(ds_name, embed_external_files=embed_external_files, token=self._token) hub_ds = load_dataset(ds_name, split="train", download_mode="force_redownload") assert ds.column_names == hub_ds.column_names assert list(ds.features.keys()) == list(hub_ds.features.keys()) assert ds.features == hub_ds.features assert ds[:] == hub_ds[:] hub_ds = hub_ds.cast_column("x", Image(decode=False)) elem = hub_ds[0]["x"] path, bytes_ = elem["path"], elem["bytes"] assert isinstance(path, str) assert bool(bytes_) == embed_external_files @require_pil def test_push_dataset_to_hub_custom_features_image_list(self, temporary_repo): image_path = os.path.join(os.path.dirname(__file__), "features", "data", "test_image_rgb.jpg") data = {"x": [[image_path], [image_path, image_path]], "y": [0, -1]} features = Features({"x": List(Image()), "y": Value("int32")}) ds = Dataset.from_dict(data, features=features) for embed_external_files in [True, False]: with temporary_repo() as ds_name: ds.push_to_hub(ds_name, embed_external_files=embed_external_files, token=self._token) hub_ds = load_dataset(ds_name, split="train", download_mode="force_redownload") assert ds.column_names == hub_ds.column_names assert list(ds.features.keys()) == list(hub_ds.features.keys()) assert ds.features == hub_ds.features assert ds[:] == hub_ds[:] hub_ds = hub_ds.cast_column("x", List(Image(decode=False))) elem = hub_ds[0]["x"][0] path, bytes_ = elem["path"], elem["bytes"] assert isinstance(path, str) assert bool(bytes_) == embed_external_files def test_push_dataset_dict_to_hub_custom_features(self, temporary_repo): features = Features({"x": Value("int64"), "y": ClassLabel(names=["neg", "pos"])}) ds = Dataset.from_dict({"x": [1, 2, 3], "y": [0, 0, 1]}, features=features) local_ds = DatasetDict({"test": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["test"].features.keys()) == list(hub_ds["test"].features.keys()) assert local_ds["test"].features == hub_ds["test"].features def test_push_dataset_to_hub_custom_splits(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) with temporary_repo() as ds_name: ds.push_to_hub(ds_name, split="random", token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert ds.column_names == hub_ds["random"].column_names assert list(ds.features.keys()) == list(hub_ds["random"].features.keys()) assert ds.features == hub_ds["random"].features def test_push_dataset_to_hub_multiple_splits_one_by_one(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) with temporary_repo() as ds_name: ds.push_to_hub(ds_name, split="train", token=self._token) ds.push_to_hub(ds_name, split="test", token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert sorted(hub_ds) == ["test", "train"] assert ds.column_names == hub_ds["train"].column_names assert list(ds.features.keys()) == list(hub_ds["train"].features.keys()) assert ds.features == hub_ds["train"].features def test_push_dataset_dict_to_hub_custom_splits(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) local_ds = DatasetDict({"random": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["random"].features.keys()) == list(hub_ds["random"].features.keys()) assert local_ds["random"].features == hub_ds["random"].features @unittest.skip("This test cannot pass until iterable datasets have push to hub") def test_push_streaming_dataset_dict_to_hub(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) local_ds = DatasetDict({"train": ds}) with tempfile.TemporaryDirectory() as tmp: local_ds.save_to_disk(tmp) local_ds = load_dataset(tmp, streaming=True) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features def test_push_multiple_dataset_configs_to_hub_load_dataset_builder(self, temporary_repo): ds_default = Dataset.from_dict({"a": [0], "b": [1]}) ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) with temporary_repo() as ds_name: ds_default.push_to_hub(ds_name, token=self._token) ds_config1.push_to_hub(ds_name, "config1", token=self._token) ds_config2.push_to_hub(ds_name, "config2", token=self._token) ds_builder_default = load_dataset_builder(ds_name, download_mode="force_redownload") # default config assert len(ds_builder_default.BUILDER_CONFIGS) == 3 assert len(ds_builder_default.config.data_files["train"]) == 1 assert fnmatch.fnmatch( ds_builder_default.config.data_files["train"][0], "*/data/train-*", ) ds_builder_config1 = load_dataset_builder(ds_name, "config1", download_mode="force_redownload") assert len(ds_builder_config1.BUILDER_CONFIGS) == 3 assert len(ds_builder_config1.config.data_files["train"]) == 1 assert fnmatch.fnmatch( ds_builder_config1.config.data_files["train"][0], "*/config1/train-*", ) ds_builder_config2 = load_dataset_builder(ds_name, "config2", download_mode="force_redownload") assert len(ds_builder_config2.BUILDER_CONFIGS) == 3 assert len(ds_builder_config2.config.data_files["train"]) == 1 assert fnmatch.fnmatch( ds_builder_config2.config.data_files["train"][0], "*/config2/train-*", ) with pytest.raises(ValueError): # no config 'config3' load_dataset_builder(ds_name, "config3", download_mode="force_redownload") def test_push_multiple_dataset_configs_to_hub_load_dataset(self, temporary_repo): ds_default = Dataset.from_dict({"a": [0], "b": [1]}) ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) with temporary_repo() as ds_name: ds_default.push_to_hub(ds_name, token=self._token) ds_config1.push_to_hub(ds_name, "config1", token=self._token) ds_config2.push_to_hub(ds_name, "config2", token=self._token) files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset")) assert files == [ ".gitattributes", "README.md", "config1/train-00000-of-00001.parquet", "config2/train-00000-of-00001.parquet", "data/train-00000-of-00001.parquet", ] hub_ds_default = load_dataset(ds_name, download_mode="force_redownload") hub_ds_config1 = load_dataset(ds_name, "config1", download_mode="force_redownload") hub_ds_config2 = load_dataset(ds_name, "config2", download_mode="force_redownload") # only "train" split assert len(hub_ds_default) == len(hub_ds_config1) == len(hub_ds_config2) == 1 assert ds_default.column_names == hub_ds_default["train"].column_names == ["a", "b"] assert ds_config1.column_names == hub_ds_config1["train"].column_names == ["x", "y"] assert ds_config2.column_names == hub_ds_config2["train"].column_names == ["foo", "bar"] assert ds_default.features == hub_ds_default["train"].features assert ds_config1.features == hub_ds_config1["train"].features assert ds_config2.features == hub_ds_config2["train"].features assert ds_default.num_rows == hub_ds_default["train"].num_rows == 1 assert ds_config1.num_rows == hub_ds_config1["train"].num_rows == 3 assert ds_config2.num_rows == hub_ds_config2["train"].num_rows == 2 with pytest.raises(ValueError): # no config 'config3' load_dataset(ds_name, "config3", download_mode="force_redownload") @pytest.mark.parametrize("specific_default_config_name", [False, True]) def test_push_multiple_dataset_configs_to_hub_readme_metadata_content( self, specific_default_config_name, temporary_repo ): ds_default = Dataset.from_dict({"a": [0], "b": [2]}) ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) with temporary_repo() as ds_name: if specific_default_config_name: ds_default.push_to_hub(ds_name, config_name="config0", set_default=True, token=self._token) else: ds_default.push_to_hub(ds_name, token=self._token) ds_config1.push_to_hub(ds_name, "config1", token=self._token) ds_config2.push_to_hub(ds_name, "config2", token=self._token) # check that configs args was correctly pushed to README.md ds_readme_path = cached_path(hf_dataset_url(ds_name, "README.md")) dataset_card_data = DatasetCard.load(ds_readme_path).data assert METADATA_CONFIGS_FIELD in dataset_card_data assert isinstance(dataset_card_data[METADATA_CONFIGS_FIELD], list) assert sorted(dataset_card_data[METADATA_CONFIGS_FIELD], key=lambda x: x["config_name"]) == ( [ { "config_name": "config0", "data_files": [ {"split": "train", "path": "config0/train-*"}, ], "default": True, }, ] if specific_default_config_name else [] ) + [ { "config_name": "config1", "data_files": [ {"split": "train", "path": "config1/train-*"}, ], }, { "config_name": "config2", "data_files": [ {"split": "train", "path": "config2/train-*"}, ], }, ] + ( [] if specific_default_config_name else [ { "config_name": "default", "data_files": [ {"split": "train", "path": "data/train-*"}, ], }, ] ) def test_push_multiple_dataset_dict_configs_to_hub_load_dataset_builder(self, temporary_repo): ds_default = Dataset.from_dict({"a": [0], "b": [1]}) ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) ds_default = DatasetDict({"random": ds_default}) ds_config1 = DatasetDict({"random": ds_config1}) ds_config2 = DatasetDict({"random": ds_config2}) with temporary_repo() as ds_name: ds_default.push_to_hub(ds_name, token=self._token) ds_config1.push_to_hub(ds_name, "config1", token=self._token) ds_config2.push_to_hub(ds_name, "config2", token=self._token) ds_builder_default = load_dataset_builder(ds_name, download_mode="force_redownload") # default config assert len(ds_builder_default.BUILDER_CONFIGS) == 3 assert len(ds_builder_default.config.data_files["random"]) == 1 assert fnmatch.fnmatch( ds_builder_default.config.data_files["random"][0], "*/data/random-*", ) ds_builder_config1 = load_dataset_builder(ds_name, "config1", download_mode="force_redownload") assert len(ds_builder_config1.BUILDER_CONFIGS) == 3 assert len(ds_builder_config1.config.data_files["random"]) == 1 assert fnmatch.fnmatch( ds_builder_config1.config.data_files["random"][0], "*/config1/random-*", ) ds_builder_config2 = load_dataset_builder(ds_name, "config2", download_mode="force_redownload") assert len(ds_builder_config2.BUILDER_CONFIGS) == 3 assert len(ds_builder_config2.config.data_files["random"]) == 1 assert fnmatch.fnmatch( ds_builder_config2.config.data_files["random"][0], "*/config2/random-*", ) with pytest.raises(ValueError): # no config named 'config3' load_dataset_builder(ds_name, "config3", download_mode="force_redownload") def test_push_multiple_dataset_dict_configs_to_hub_load_dataset(self, temporary_repo): ds_default = Dataset.from_dict({"a": [0], "b": [1]}) ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) ds_default = DatasetDict({"train": ds_default, "random": ds_default}) ds_config1 = DatasetDict({"train": ds_config1, "random": ds_config1}) ds_config2 = DatasetDict({"train": ds_config2, "random": ds_config2}) with temporary_repo() as ds_name: ds_default.push_to_hub(ds_name, token=self._token) ds_config1.push_to_hub(ds_name, "config1", token=self._token) ds_config2.push_to_hub(ds_name, "config2", token=self._token) files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset")) assert files == [ ".gitattributes", "README.md", "config1/random-00000-of-00001.parquet", "config1/train-00000-of-00001.parquet", "config2/random-00000-of-00001.parquet", "config2/train-00000-of-00001.parquet", "data/random-00000-of-00001.parquet", "data/train-00000-of-00001.parquet", ] hub_ds_default = load_dataset(ds_name, download_mode="force_redownload") hub_ds_config1 = load_dataset(ds_name, "config1", download_mode="force_redownload") hub_ds_config2 = load_dataset(ds_name, "config2", download_mode="force_redownload") # two splits expected_splits = ["random", "train"] assert len(hub_ds_default) == len(hub_ds_config1) == len(hub_ds_config2) == 2 assert sorted(hub_ds_default) == sorted(hub_ds_config1) == sorted(hub_ds_config2) == expected_splits for split in expected_splits: assert ds_default[split].column_names == hub_ds_default[split].column_names == ["a", "b"] assert ds_config1[split].column_names == hub_ds_config1[split].column_names == ["x", "y"] assert ds_config2[split].column_names == hub_ds_config2[split].column_names == ["foo", "bar"] assert ds_default[split].features == hub_ds_default[split].features assert ds_config1[split].features == hub_ds_config1[split].features assert ds_config2[split].features == hub_ds_config2["train"].features assert ds_default[split].num_rows == hub_ds_default[split].num_rows == 1 assert ds_config1[split].num_rows == hub_ds_config1[split].num_rows == 3 assert ds_config2[split].num_rows == hub_ds_config2[split].num_rows == 2 with pytest.raises(ValueError): # no config 'config3' load_dataset(ds_name, "config3", download_mode="force_redownload") @pytest.mark.parametrize("specific_default_config_name", [False, True]) def test_push_multiple_dataset_dict_configs_to_hub_readme_metadata_content( self, specific_default_config_name, temporary_repo ): ds_default = Dataset.from_dict({"a": [0], "b": [1]}) ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) ds_default = DatasetDict({"train": ds_default, "random": ds_default}) ds_config1 = DatasetDict({"train": ds_config1, "random": ds_config1}) ds_config2 = DatasetDict({"train": ds_config2, "random": ds_config2}) with temporary_repo() as ds_name: if specific_default_config_name: ds_default.push_to_hub(ds_name, config_name="config0", set_default=True, token=self._token) else: ds_default.push_to_hub(ds_name, token=self._token) ds_config1.push_to_hub(ds_name, "config1", token=self._token) ds_config2.push_to_hub(ds_name, "config2", token=self._token) # check that configs args was correctly pushed to README.md ds_readme_path = cached_path(hf_dataset_url(ds_name, "README.md")) dataset_card_data = DatasetCard.load(ds_readme_path).data assert METADATA_CONFIGS_FIELD in dataset_card_data assert isinstance(dataset_card_data[METADATA_CONFIGS_FIELD], list) assert sorted(dataset_card_data[METADATA_CONFIGS_FIELD], key=lambda x: x["config_name"]) == ( [ { "config_name": "config0", "data_files": [ {"split": "train", "path": "config0/train-*"}, {"split": "random", "path": "config0/random-*"}, ], "default": True, }, ] if specific_default_config_name else [] ) + [ { "config_name": "config1", "data_files": [ {"split": "train", "path": "config1/train-*"}, {"split": "random", "path": "config1/random-*"}, ], }, { "config_name": "config2", "data_files": [ {"split": "train", "path": "config2/train-*"}, {"split": "random", "path": "config2/random-*"}, ], }, ] + ( [] if specific_default_config_name else [ { "config_name": "default", "data_files": [ {"split": "train", "path": "data/train-*"}, {"split": "random", "path": "data/random-*"}, ], }, ] ) def test_push_dataset_to_hub_with_config_no_metadata_configs(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) ds_another_config = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) parquet_buf = BytesIO() ds.to_parquet(parquet_buf) parquet_content = parquet_buf.getvalue() with temporary_repo() as ds_name: self._api.create_repo(ds_name, token=self._token, repo_type="dataset") # old push_to_hub was uploading the parquet files only - without metadata configs self._api.upload_file( path_or_fileobj=parquet_content, path_in_repo="data/train-00000-of-00001.parquet", repo_id=ds_name, repo_type="dataset", token=self._token, ) ds_another_config.push_to_hub(ds_name, "another_config", token=self._token) ds_builder = load_dataset_builder(ds_name, download_mode="force_redownload") assert len(ds_builder.config.data_files) == 1 assert len(ds_builder.config.data_files["train"]) == 1 assert fnmatch.fnmatch(ds_builder.config.data_files["train"][0], "*/data/train-00000-of-00001.parquet") ds_another_config_builder = load_dataset_builder( ds_name, "another_config", download_mode="force_redownload" ) assert len(ds_another_config_builder.config.data_files) == 1 assert len(ds_another_config_builder.config.data_files["train"]) == 1 assert fnmatch.fnmatch( ds_another_config_builder.config.data_files["train"][0], "*/another_config/train-00000-of-00001.parquet", ) def test_push_dataset_dict_to_hub_with_config_no_metadata_configs(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) ds_another_config = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) parquet_buf = BytesIO() ds.to_parquet(parquet_buf) parquet_content = parquet_buf.getvalue() local_ds_another_config = DatasetDict({"random": ds_another_config}) with temporary_repo() as ds_name: self._api.create_repo(ds_name, token=self._token, repo_type="dataset") # old push_to_hub was uploading the parquet files only - without metadata configs self._api.upload_file( path_or_fileobj=parquet_content, path_in_repo="data/random-00000-of-00001.parquet", repo_id=ds_name, repo_type="dataset", token=self._token, ) local_ds_another_config.push_to_hub(ds_name, "another_config", token=self._token) ds_builder = load_dataset_builder(ds_name, download_mode="force_redownload") assert len(ds_builder.config.data_files) == 1 assert len(ds_builder.config.data_files["random"]) == 1 assert fnmatch.fnmatch(ds_builder.config.data_files["random"][0], "*/data/random-00000-of-00001.parquet") ds_another_config_builder = load_dataset_builder( ds_name, "another_config", download_mode="force_redownload" ) assert len(ds_another_config_builder.config.data_files) == 1 assert len(ds_another_config_builder.config.data_files["random"]) == 1 assert fnmatch.fnmatch( ds_another_config_builder.config.data_files["random"][0], "*/another_config/random-00000-of-00001.parquet", ) def test_push_dataset_dict_to_hub_num_proc(self, temporary_repo, set_ci_hub_access_token): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, num_proc=2) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there is a single file on the repository that has the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset")) assert files == [ ".gitattributes", "README.md", "data/train-00000-of-00002.parquet", "data/train-00001-of-00002.parquet", ] def test_push_dataset_dict_to_hub_iterable(self, temporary_repo, set_ci_hub_access_token): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}).to_iterable_dataset() local_ds = IterableDatasetDict({"train": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there is a single file on the repository that has the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset")) assert files == [".gitattributes", "README.md", "data/train-00000-of-00001.parquet"] def test_push_dataset_dict_to_hub_iterable_num_proc(self, temporary_repo, set_ci_hub_access_token): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}).to_iterable_dataset(num_shards=3) local_ds = IterableDatasetDict({"train": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, num_proc=2) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there is a single file on the repository that has the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset")) assert files == [ ".gitattributes", "README.md", "data/train-00000-of-00003.parquet", "data/train-00001-of-00003.parquet", "data/train-00002-of-00003.parquet", ] class DummyFolderBasedBuilder(FolderBasedBuilder): BASE_FEATURE = dict BASE_COLUMN_NAME = "base" BUILDER_CONFIG_CLASS = FolderBasedBuilderConfig EXTENSIONS = [".txt"] # CLASSIFICATION_TASK = TextClassification(text_column="base", label_column="label") @pytest.fixture(params=[".jsonl", ".csv"]) def text_file_with_metadata(request, tmp_path, text_file): metadata_filename_extension = request.param data_dir = tmp_path / "data_dir" data_dir.mkdir() text_file_path = data_dir / "file.txt" shutil.copyfile(text_file, text_file_path) metadata_file_path = data_dir / f"metadata{metadata_filename_extension}" metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} """ if metadata_filename_extension == ".jsonl" else """\ file_name,additional_feature file.txt,Dummy file """ ) with open(metadata_file_path, "w", encoding="utf-8") as f: f.write(metadata) return text_file_path, metadata_file_path @for_all_test_methods(xfail_if_500_502_http_error) @pytest.mark.usefixtures("ci_hub_config") class TestLoadFromHub: _api = HfApi(endpoint=CI_HUB_ENDPOINT) _token = CI_HUB_USER_TOKEN def test_load_dataset_with_metadata_file(self, temporary_repo, text_file_with_metadata, tmp_path): text_file_path, metadata_file_path = text_file_with_metadata data_dir_path = text_file_path.parent cache_dir_path = tmp_path / ".cache" cache_dir_path.mkdir() with temporary_repo() as repo_id: self._api.create_repo(repo_id, token=self._token, repo_type="dataset") self._api.upload_folder( folder_path=str(data_dir_path), repo_id=repo_id, repo_type="dataset", token=self._token, ) data_files = [ f"hf://datasets/{repo_id}/{text_file_path.name}", f"hf://datasets/{repo_id}/{metadata_file_path.name}", ] builder = DummyFolderBasedBuilder( dataset_name=repo_id.split("/")[-1], data_files=data_files, cache_dir=str(cache_dir_path) ) download_manager = DownloadManager() gen_kwargs = builder._split_generators(download_manager)[0].gen_kwargs generator = builder._generate_examples(**gen_kwargs) result = [example for _, example in generator] assert len(result) == 1 def test_get_data_patterns(self, temporary_repo, tmp_path): repo_dir = tmp_path / "test_get_data_patterns" data_dir = repo_dir / "data" data_dir.mkdir(parents=True) data_file = data_dir / "train-00001-of-00009.parquet" data_file.touch() with temporary_repo() as repo_id: self._api.create_repo(repo_id, token=self._token, repo_type="dataset") self._api.upload_folder( folder_path=str(repo_dir), repo_id=repo_id, repo_type="dataset", token=self._token, ) data_file_patterns = get_data_patterns(f"hf://datasets/{repo_id}") assert data_file_patterns == { "train": ["data/train-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*"] } @pytest.mark.parametrize("dataset", ["gated", "private"]) def test_load_dataset_raises_for_unauthenticated_user( self, dataset, hf_gated_dataset_repo_txt_data, hf_private_dataset_repo_txt_data ): dataset_ids = { "gated": hf_gated_dataset_repo_txt_data, "private": hf_private_dataset_repo_txt_data, } dataset_id = dataset_ids[dataset] with pytest.raises(DatasetNotFoundError): _ = load_dataset(dataset_id, token=False)
datasets/tests/test_upstream_hub.py/0
{ "file_path": "datasets/tests/test_upstream_hub.py", "repo_id": "datasets", "token_count": 24619 }
118
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # PEFT Diffusers supports loading adapters such as [LoRA](../../using-diffusers/loading_adapters) with the [PEFT](https://huggingface.co/docs/peft/index) library with the [`~loaders.peft.PeftAdapterMixin`] class. This allows modeling classes in Diffusers like [`UNet2DConditionModel`], [`SD3Transformer2DModel`] to operate with an adapter. <Tip> Refer to the [Inference with PEFT](../../tutorials/using_peft_for_inference.md) tutorial for an overview of how to use PEFT in Diffusers for inference. </Tip> ## PeftAdapterMixin [[autodoc]] loaders.peft.PeftAdapterMixin
diffusers/docs/source/en/api/loaders/peft.md/0
{ "file_path": "diffusers/docs/source/en/api/loaders/peft.md", "repo_id": "diffusers", "token_count": 332 }
119
<!-- Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # AutoencoderKLAllegro The 3D variational autoencoder (VAE) model with KL loss used in [Allegro](https://github.com/rhymes-ai/Allegro) was introduced in [Allegro: Open the Black Box of Commercial-Level Video Generation Model](https://huggingface.co/papers/2410.15458) by RhymesAI. The model can be loaded with the following code snippet. ```python from diffusers import AutoencoderKLAllegro vae = AutoencoderKLAllegro.from_pretrained("rhymes-ai/Allegro", subfolder="vae", torch_dtype=torch.float32).to("cuda") ``` ## AutoencoderKLAllegro [[autodoc]] AutoencoderKLAllegro - decode - encode - all ## AutoencoderKLOutput [[autodoc]] models.autoencoders.autoencoder_kl.AutoencoderKLOutput ## DecoderOutput [[autodoc]] models.autoencoders.vae.DecoderOutput
diffusers/docs/source/en/api/models/autoencoderkl_allegro.md/0
{ "file_path": "diffusers/docs/source/en/api/models/autoencoderkl_allegro.md", "repo_id": "diffusers", "token_count": 427 }
120
<!-- Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # MochiTransformer3DModel A Diffusion Transformer model for 3D video-like data was introduced in [Mochi-1 Preview](https://huggingface.co/genmo/mochi-1-preview) by Genmo. The model can be loaded with the following code snippet. ```python from diffusers import MochiTransformer3DModel transformer = MochiTransformer3DModel.from_pretrained("genmo/mochi-1-preview", subfolder="transformer", torch_dtype=torch.float16).to("cuda") ``` ## MochiTransformer3DModel [[autodoc]] MochiTransformer3DModel ## Transformer2DModelOutput [[autodoc]] models.modeling_outputs.Transformer2DModelOutput
diffusers/docs/source/en/api/models/mochi_transformer3d.md/0
{ "file_path": "diffusers/docs/source/en/api/models/mochi_transformer3d.md", "repo_id": "diffusers", "token_count": 340 }
121
<!-- Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. --> # HiDreamImage [HiDream-I1](https://huggingface.co/HiDream-ai) by HiDream.ai <Tip> [Caching](../../optimization/cache) may also speed up inference by storing and reusing intermediate outputs. </Tip> ## Available models The following models are available for the [`HiDreamImagePipeline`](text-to-image) pipeline: | Model name | Description | |:---|:---| | [`HiDream-ai/HiDream-I1-Full`](https://huggingface.co/HiDream-ai/HiDream-I1-Full) | - | | [`HiDream-ai/HiDream-I1-Dev`](https://huggingface.co/HiDream-ai/HiDream-I1-Dev) | - | | [`HiDream-ai/HiDream-I1-Fast`](https://huggingface.co/HiDream-ai/HiDream-I1-Fast) | - | ## HiDreamImagePipeline [[autodoc]] HiDreamImagePipeline - all - __call__ ## HiDreamImagePipelineOutput [[autodoc]] pipelines.hidream_image.pipeline_output.HiDreamImagePipelineOutput
diffusers/docs/source/en/api/pipelines/hidream.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/hidream.md", "repo_id": "diffusers", "token_count": 474 }
122
<!-- Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. --> # Mochi 1 Preview <div class="flex flex-wrap space-x-1"> <img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/> </div> > [!TIP] > Only a research preview of the model weights is available at the moment. [Mochi 1](https://huggingface.co/genmo/mochi-1-preview) is a video generation model by Genmo with a strong focus on prompt adherence and motion quality. The model features a 10B parameter Asmmetric Diffusion Transformer (AsymmDiT) architecture, and uses non-square QKV and output projection layers to reduce inference memory requirements. A single T5-XXL model is used to encode prompts. *Mochi 1 preview is an open state-of-the-art video generation model with high-fidelity motion and strong prompt adherence in preliminary evaluation. This model dramatically closes the gap between closed and open video generation systems. The model is released under a permissive Apache 2.0 license.* > [!TIP] > Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. ## Quantization Quantization helps reduce the memory requirements of very large models by storing model weights in a lower precision data type. However, quantization may have varying impact on video quality depending on the video model. Refer to the [Quantization](../../quantization/overview) overview to learn more about supported quantization backends and selecting a quantization backend that supports your use case. The example below demonstrates how to load a quantized [`MochiPipeline`] for inference with bitsandbytes. ```py import torch from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig, MochiTransformer3DModel, MochiPipeline from diffusers.utils import export_to_video from transformers import BitsAndBytesConfig as BitsAndBytesConfig, T5EncoderModel quant_config = BitsAndBytesConfig(load_in_8bit=True) text_encoder_8bit = T5EncoderModel.from_pretrained( "genmo/mochi-1-preview", subfolder="text_encoder", quantization_config=quant_config, torch_dtype=torch.float16, ) quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=True) transformer_8bit = MochiTransformer3DModel.from_pretrained( "genmo/mochi-1-preview", subfolder="transformer", quantization_config=quant_config, torch_dtype=torch.float16, ) pipeline = MochiPipeline.from_pretrained( "genmo/mochi-1-preview", text_encoder=text_encoder_8bit, transformer=transformer_8bit, torch_dtype=torch.float16, device_map="balanced", ) video = pipeline( "Close-up of a cats eye, with the galaxy reflected in the cats eye. Ultra high resolution 4k.", num_inference_steps=28, guidance_scale=3.5 ).frames[0] export_to_video(video, "cat.mp4") ``` ## Generating videos with Mochi-1 Preview The following example will download the full precision `mochi-1-preview` weights and produce the highest quality results but will require at least 42GB VRAM to run. ```python import torch from diffusers import MochiPipeline from diffusers.utils import export_to_video pipe = MochiPipeline.from_pretrained("genmo/mochi-1-preview") # Enable memory savings pipe.enable_model_cpu_offload() pipe.enable_vae_tiling() prompt = "Close-up of a chameleon's eye, with its scaly skin changing color. Ultra high resolution 4k." with torch.autocast("cuda", torch.bfloat16, cache_enabled=False): frames = pipe(prompt, num_frames=85).frames[0] export_to_video(frames, "mochi.mp4", fps=30) ``` ## Using a lower precision variant to save memory The following example will use the `bfloat16` variant of the model and requires 22GB VRAM to run. There is a slight drop in the quality of the generated video as a result. ```python import torch from diffusers import MochiPipeline from diffusers.utils import export_to_video pipe = MochiPipeline.from_pretrained("genmo/mochi-1-preview", variant="bf16", torch_dtype=torch.bfloat16) # Enable memory savings pipe.enable_model_cpu_offload() pipe.enable_vae_tiling() prompt = "Close-up of a chameleon's eye, with its scaly skin changing color. Ultra high resolution 4k." frames = pipe(prompt, num_frames=85).frames[0] export_to_video(frames, "mochi.mp4", fps=30) ``` ## Reproducing the results from the Genmo Mochi repo The [Genmo Mochi implementation](https://github.com/genmoai/mochi/tree/main) uses different precision values for each stage in the inference process. The text encoder and VAE use `torch.float32`, while the DiT uses `torch.bfloat16` with the [attention kernel](https://pytorch.org/docs/stable/generated/torch.nn.attention.sdpa_kernel.html#torch.nn.attention.sdpa_kernel) set to `EFFICIENT_ATTENTION`. Diffusers pipelines currently do not support setting different `dtypes` for different stages of the pipeline. In order to run inference in the same way as the original implementation, please refer to the following example. <Tip> The original Mochi implementation zeros out empty prompts. However, enabling this option and placing the entire pipeline under autocast can lead to numerical overflows with the T5 text encoder. When enabling `force_zeros_for_empty_prompt`, it is recommended to run the text encoding step outside the autocast context in full precision. </Tip> <Tip> Decoding the latents in full precision is very memory intensive. You will need at least 70GB VRAM to generate the 163 frames in this example. To reduce memory, either reduce the number of frames or run the decoding step in `torch.bfloat16`. </Tip> ```python import torch from torch.nn.attention import SDPBackend, sdpa_kernel from diffusers import MochiPipeline from diffusers.utils import export_to_video from diffusers.video_processor import VideoProcessor pipe = MochiPipeline.from_pretrained("genmo/mochi-1-preview", force_zeros_for_empty_prompt=True) pipe.enable_vae_tiling() pipe.enable_model_cpu_offload() prompt = "An aerial shot of a parade of elephants walking across the African savannah. The camera showcases the herd and the surrounding landscape." with torch.no_grad(): prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask = ( pipe.encode_prompt(prompt=prompt) ) with torch.autocast("cuda", torch.bfloat16): with sdpa_kernel(SDPBackend.EFFICIENT_ATTENTION): frames = pipe( prompt_embeds=prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_embeds=negative_prompt_embeds, negative_prompt_attention_mask=negative_prompt_attention_mask, guidance_scale=4.5, num_inference_steps=64, height=480, width=848, num_frames=163, generator=torch.Generator("cuda").manual_seed(0), output_type="latent", return_dict=False, )[0] video_processor = VideoProcessor(vae_scale_factor=8) has_latents_mean = hasattr(pipe.vae.config, "latents_mean") and pipe.vae.config.latents_mean is not None has_latents_std = hasattr(pipe.vae.config, "latents_std") and pipe.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = ( torch.tensor(pipe.vae.config.latents_mean).view(1, 12, 1, 1, 1).to(frames.device, frames.dtype) ) latents_std = ( torch.tensor(pipe.vae.config.latents_std).view(1, 12, 1, 1, 1).to(frames.device, frames.dtype) ) frames = frames * latents_std / pipe.vae.config.scaling_factor + latents_mean else: frames = frames / pipe.vae.config.scaling_factor with torch.no_grad(): video = pipe.vae.decode(frames.to(pipe.vae.dtype), return_dict=False)[0] video = video_processor.postprocess_video(video)[0] export_to_video(video, "mochi.mp4", fps=30) ``` ## Running inference with multiple GPUs It is possible to split the large Mochi transformer across multiple GPUs using the `device_map` and `max_memory` options in `from_pretrained`. In the following example we split the model across two GPUs, each with 24GB of VRAM. ```python import torch from diffusers import MochiPipeline, MochiTransformer3DModel from diffusers.utils import export_to_video model_id = "genmo/mochi-1-preview" transformer = MochiTransformer3DModel.from_pretrained( model_id, subfolder="transformer", device_map="auto", max_memory={0: "24GB", 1: "24GB"} ) pipe = MochiPipeline.from_pretrained(model_id, transformer=transformer) pipe.enable_model_cpu_offload() pipe.enable_vae_tiling() with torch.autocast(device_type="cuda", dtype=torch.bfloat16, cache_enabled=False): frames = pipe( prompt="Close-up of a chameleon's eye, with its scaly skin changing color. Ultra high resolution 4k.", negative_prompt="", height=480, width=848, num_frames=85, num_inference_steps=50, guidance_scale=4.5, num_videos_per_prompt=1, generator=torch.Generator(device="cuda").manual_seed(0), max_sequence_length=256, output_type="pil", ).frames[0] export_to_video(frames, "output.mp4", fps=30) ``` ## Using single file loading with the Mochi Transformer You can use `from_single_file` to load the Mochi transformer in its original format. <Tip> Diffusers currently doesn't support using the FP8 scaled versions of the Mochi single file checkpoints. </Tip> ```python import torch from diffusers import MochiPipeline, MochiTransformer3DModel from diffusers.utils import export_to_video model_id = "genmo/mochi-1-preview" ckpt_path = "https://huggingface.co/Comfy-Org/mochi_preview_repackaged/blob/main/split_files/diffusion_models/mochi_preview_bf16.safetensors" transformer = MochiTransformer3DModel.from_pretrained(ckpt_path, torch_dtype=torch.bfloat16) pipe = MochiPipeline.from_pretrained(model_id, transformer=transformer) pipe.enable_model_cpu_offload() pipe.enable_vae_tiling() with torch.autocast(device_type="cuda", dtype=torch.bfloat16, cache_enabled=False): frames = pipe( prompt="Close-up of a chameleon's eye, with its scaly skin changing color. Ultra high resolution 4k.", negative_prompt="", height=480, width=848, num_frames=85, num_inference_steps=50, guidance_scale=4.5, num_videos_per_prompt=1, generator=torch.Generator(device="cuda").manual_seed(0), max_sequence_length=256, output_type="pil", ).frames[0] export_to_video(frames, "output.mp4", fps=30) ``` ## MochiPipeline [[autodoc]] MochiPipeline - all - __call__ ## MochiPipelineOutput [[autodoc]] pipelines.mochi.pipeline_output.MochiPipelineOutput
diffusers/docs/source/en/api/pipelines/mochi.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/mochi.md", "repo_id": "diffusers", "token_count": 3882 }
123
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Shap-E The Shap-E model was proposed in [Shap-E: Generating Conditional 3D Implicit Functions](https://huggingface.co/papers/2305.02463) by Alex Nichol and Heewoo Jun from [OpenAI](https://github.com/openai). The abstract from the paper is: *We present Shap-E, a conditional generative model for 3D assets. Unlike recent work on 3D generative models which produce a single output representation, Shap-E directly generates the parameters of implicit functions that can be rendered as both textured meshes and neural radiance fields. We train Shap-E in two stages: first, we train an encoder that deterministically maps 3D assets into the parameters of an implicit function; second, we train a conditional diffusion model on outputs of the encoder. When trained on a large dataset of paired 3D and text data, our resulting models are capable of generating complex and diverse 3D assets in a matter of seconds. When compared to Point-E, an explicit generative model over point clouds, Shap-E converges faster and reaches comparable or better sample quality despite modeling a higher-dimensional, multi-representation output space.* The original codebase can be found at [openai/shap-e](https://github.com/openai/shap-e). <Tip> See the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## ShapEPipeline [[autodoc]] ShapEPipeline - all - __call__ ## ShapEImg2ImgPipeline [[autodoc]] ShapEImg2ImgPipeline - all - __call__ ## ShapEPipelineOutput [[autodoc]] pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput
diffusers/docs/source/en/api/pipelines/shap_e.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/shap_e.md", "repo_id": "diffusers", "token_count": 590 }
124
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Installation Diffusers is tested on Python 3.8+, PyTorch 1.4+, and Flax 0.4.1+. Follow the installation instructions for the deep learning library you're using, [PyTorch](https://pytorch.org/get-started/locally/) or [Flax](https://flax.readthedocs.io/en/latest/). Create a [virtual environment](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/) for easier management of separate projects and to avoid compatibility issues between dependencies. Use [uv](https://docs.astral.sh/uv/), a Rust-based Python package and project manager, to create a virtual environment and install Diffusers. ```bash uv venv my-env source my-env/bin/activate ``` Install Diffusers with one of the following methods. <hfoptions id="install"> <hfoption id="pip"> PyTorch only supports Python 3.8 - 3.11 on Windows. ```bash uv pip install diffusers["torch"] transformers ``` Use the command below for Flax. ```bash uv pip install diffusers["flax"] transformers ``` </hfoption> <hfoption id="conda"> ```bash conda install -c conda-forge diffusers ``` </hfoption> <hfoption id="source"> A source install installs the `main` version instead of the latest `stable` version. The `main` version is useful for staying updated with the latest changes but it may not always be stable. If you run into a problem, open an [Issue](https://github.com/huggingface/diffusers/issues/new/choose) and we will try to resolve it as soon as possible. Make sure [Accelerate](https://huggingface.co/docs/accelerate/index) is installed. ```bash uv pip install accelerate ``` Install Diffusers from source with the command below. ```bash uv pip install git+https://github.com/huggingface/diffusers ``` </hfoption> </hfoptions> ## Editable install An editable install is recommended for development workflows or if you're using the `main` version of the source code. A special link is created between the cloned repository and the Python library paths. This avoids reinstalling a package after every change. Clone the repository and install Diffusers with the following commands. <hfoptions id="editable"> <hfoption id="PyTorch"> ```bash git clone https://github.com/huggingface/diffusers.git cd diffusers uv pip install -e ".[torch]" ``` </hfoption> <hfoption id="Flax"> ```bash git clone https://github.com/huggingface/diffusers.git cd diffusers uv pip install -e ".[flax]" ``` </hfoption> </hfoptions> > [!WARNING] > You must keep the `diffusers` folder if you want to keep using the library with the editable install. Update your cloned repository to the latest version of Diffusers with the command below. ```bash cd ~/diffusers/ git pull ``` ## Cache Model weights and files are downloaded from the Hub to a cache, which is usually your home directory. Change the cache location with the [HF_HOME](https://huggingface.co/docs/huggingface_hub/package_reference/environment_variables#hfhome) or [HF_HUB_CACHE](https://huggingface.co/docs/huggingface_hub/package_reference/environment_variables#hfhubcache) environment variables or configuring the `cache_dir` parameter in methods like [`~DiffusionPipeline.from_pretrained`]. <hfoptions id="cache"> <hfoption id="env variable"> ```bash export HF_HOME="/path/to/your/cache" export HF_HUB_CACHE="/path/to/your/hub/cache" ``` </hfoption> <hfoption id="from_pretrained"> ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", cache_dir="/path/to/your/cache" ) ``` </hfoption> </hfoptions> Cached files allow you to use Diffusers offline. Set the [HF_HUB_OFFLINE](https://huggingface.co/docs/huggingface_hub/package_reference/environment_variables#hfhuboffline) environment variable to `1` to prevent Diffusers from connecting to the internet. ```shell export HF_HUB_OFFLINE=1 ``` For more details about managing and cleaning the cache, take a look at the [Understand caching](https://huggingface.co/docs/huggingface_hub/guides/manage-cache) guide. ## Telemetry logging Diffusers gathers telemetry information during [`~DiffusionPipeline.from_pretrained`] requests. The data gathered includes the Diffusers and PyTorch/Flax version, the requested model or pipeline class, and the path to a pretrained checkpoint if it is hosted on the Hub. This usage data helps us debug issues and prioritize new features. Telemetry is only sent when loading models and pipelines from the Hub, and it is not collected if you're loading local files. Opt-out and disable telemetry collection with the [HF_HUB_DISABLE_TELEMETRY](https://huggingface.co/docs/huggingface_hub/package_reference/environment_variables#hfhubdisabletelemetry) environment variable. <hfoptions id="telemetry"> <hfoption id="Linux/macOS"> ```bash export HF_HUB_DISABLE_TELEMETRY=1 ``` </hfoption> <hfoption id="Windows"> ```bash set HF_HUB_DISABLE_TELEMETRY=1 ``` </hfoption> </hfoptions>
diffusers/docs/source/en/installation.md/0
{ "file_path": "diffusers/docs/source/en/installation.md", "repo_id": "diffusers", "token_count": 1679 }
125
<!-- Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # torchao [TorchAO](https://github.com/pytorch/ao) is an architecture optimization library for PyTorch. It provides high-performance dtypes, optimization techniques, and kernels for inference and training, featuring composability with native PyTorch features like [torch.compile](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html), FullyShardedDataParallel (FSDP), and more. Before you begin, make sure you have Pytorch 2.5+ and TorchAO installed. ```bash pip install -U torch torchao ``` Quantize a model by passing [`TorchAoConfig`] to [`~ModelMixin.from_pretrained`] (you can also load pre-quantized models). This works for any model in any modality, as long as it supports loading with [Accelerate](https://hf.co/docs/accelerate/index) and contains `torch.nn.Linear` layers. The example below only quantizes the weights to int8. ```python import torch from diffusers import FluxPipeline, AutoModel, TorchAoConfig model_id = "black-forest-labs/FLUX.1-dev" dtype = torch.bfloat16 quantization_config = TorchAoConfig("int8wo") transformer = AutoModel.from_pretrained( model_id, subfolder="transformer", quantization_config=quantization_config, torch_dtype=dtype, ) pipe = FluxPipeline.from_pretrained( model_id, transformer=transformer, torch_dtype=dtype, ) pipe.to("cuda") # Without quantization: ~31.447 GB # With quantization: ~20.40 GB print(f"Pipeline memory usage: {torch.cuda.max_memory_reserved() / 1024**3:.3f} GB") prompt = "A cat holding a sign that says hello world" image = pipe( prompt, num_inference_steps=50, guidance_scale=4.5, max_sequence_length=512 ).images[0] image.save("output.png") ``` TorchAO is fully compatible with [torch.compile](../optimization/fp16#torchcompile), setting it apart from other quantization methods. This makes it easy to speed up inference with just one line of code. ```python # In the above code, add the following after initializing the transformer transformer = torch.compile(transformer, mode="max-autotune", fullgraph=True) ``` For speed and memory benchmarks on Flux and CogVideoX, please refer to the table [here](https://github.com/huggingface/diffusers/pull/10009#issue-2688781450). You can also find some torchao [benchmarks](https://github.com/pytorch/ao/tree/main/torchao/quantization#benchmarks) numbers for various hardware. > [!TIP] > The FP8 post-training quantization schemes in torchao are effective for GPUs with compute capability of at least 8.9 (RTX-4090, Hopper, etc.). FP8 often provides the best speed, memory, and quality trade-off when generating images and videos. We recommend combining FP8 and torch.compile if your GPU is compatible. torchao also supports an automatic quantization API through [autoquant](https://github.com/pytorch/ao/blob/main/torchao/quantization/README.md#autoquantization). Autoquantization determines the best quantization strategy applicable to a model by comparing the performance of each technique on chosen input types and shapes. Currently, this can be used directly on the underlying modeling components. Diffusers will also expose an autoquant configuration option in the future. The `TorchAoConfig` class accepts three parameters: - `quant_type`: A string value mentioning one of the quantization types below. - `modules_to_not_convert`: A list of module full/partial module names for which quantization should not be performed. For example, to not perform any quantization of the [`FluxTransformer2DModel`]'s first block, one would specify: `modules_to_not_convert=["single_transformer_blocks.0"]`. - `kwargs`: A dict of keyword arguments to pass to the underlying quantization method which will be invoked based on `quant_type`. ## Supported quantization types torchao supports weight-only quantization and weight and dynamic-activation quantization for int8, float3-float8, and uint1-uint7. Weight-only quantization stores the model weights in a specific low-bit data type but performs computation with a higher-precision data type, like `bfloat16`. This lowers the memory requirements from model weights but retains the memory peaks for activation computation. Dynamic activation quantization stores the model weights in a low-bit dtype, while also quantizing the activations on-the-fly to save additional memory. This lowers the memory requirements from model weights, while also lowering the memory overhead from activation computations. However, this may come at a quality tradeoff at times, so it is recommended to test different models thoroughly. The quantization methods supported are as follows: | **Category** | **Full Function Names** | **Shorthands** | |--------------|-------------------------|----------------| | **Integer quantization** | `int4_weight_only`, `int8_dynamic_activation_int4_weight`, `int8_weight_only`, `int8_dynamic_activation_int8_weight` | `int4wo`, `int4dq`, `int8wo`, `int8dq` | | **Floating point 8-bit quantization** | `float8_weight_only`, `float8_dynamic_activation_float8_weight`, `float8_static_activation_float8_weight` | `float8wo`, `float8wo_e5m2`, `float8wo_e4m3`, `float8dq`, `float8dq_e4m3`, `float8dq_e4m3_tensor`, `float8dq_e4m3_row` | | **Floating point X-bit quantization** | `fpx_weight_only` | `fpX_eAwB` where `X` is the number of bits (1-7), `A` is exponent bits, and `B` is mantissa bits. Constraint: `X == A + B + 1` | | **Unsigned Integer quantization** | `uintx_weight_only` | `uint1wo`, `uint2wo`, `uint3wo`, `uint4wo`, `uint5wo`, `uint6wo`, `uint7wo` | Some quantization methods are aliases (for example, `int8wo` is the commonly used shorthand for `int8_weight_only`). This allows using the quantization methods described in the torchao docs as-is, while also making it convenient to remember their shorthand notations. Refer to the [official torchao documentation](https://docs.pytorch.org/ao/stable/index.html) for a better understanding of the available quantization methods and the exhaustive list of configuration options available. ## Serializing and Deserializing quantized models To serialize a quantized model in a given dtype, first load the model with the desired quantization dtype and then save it using the [`~ModelMixin.save_pretrained`] method. ```python import torch from diffusers import AutoModel, TorchAoConfig quantization_config = TorchAoConfig("int8wo") transformer = AutoModel.from_pretrained( "black-forest-labs/Flux.1-Dev", subfolder="transformer", quantization_config=quantization_config, torch_dtype=torch.bfloat16, ) transformer.save_pretrained("/path/to/flux_int8wo", safe_serialization=False) ``` To load a serialized quantized model, use the [`~ModelMixin.from_pretrained`] method. ```python import torch from diffusers import FluxPipeline, AutoModel transformer = AutoModel.from_pretrained("/path/to/flux_int8wo", torch_dtype=torch.bfloat16, use_safetensors=False) pipe = FluxPipeline.from_pretrained("black-forest-labs/Flux.1-Dev", transformer=transformer, torch_dtype=torch.bfloat16) pipe.to("cuda") prompt = "A cat holding a sign that says hello world" image = pipe(prompt, num_inference_steps=30, guidance_scale=7.0).images[0] image.save("output.png") ``` If you are using `torch<=2.6.0`, some quantization methods, such as `uint4wo`, cannot be loaded directly and may result in an `UnpicklingError` when trying to load the models, but work as expected when saving them. In order to work around this, one can load the state dict manually into the model. Note, however, that this requires using `weights_only=False` in `torch.load`, so it should be run only if the weights were obtained from a trustable source. ```python import torch from accelerate import init_empty_weights from diffusers import FluxPipeline, AutoModel, TorchAoConfig # Serialize the model transformer = AutoModel.from_pretrained( "black-forest-labs/Flux.1-Dev", subfolder="transformer", quantization_config=TorchAoConfig("uint4wo"), torch_dtype=torch.bfloat16, ) transformer.save_pretrained("/path/to/flux_uint4wo", safe_serialization=False, max_shard_size="50GB") # ... # Load the model state_dict = torch.load("/path/to/flux_uint4wo/diffusion_pytorch_model.bin", weights_only=False, map_location="cpu") with init_empty_weights(): transformer = AutoModel.from_config("/path/to/flux_uint4wo/config.json") transformer.load_state_dict(state_dict, strict=True, assign=True) ``` > [!TIP] > The [`AutoModel`] API is supported for PyTorch >= 2.6 as shown in the examples below. ## Resources - [TorchAO Quantization API](https://docs.pytorch.org/ao/stable/index.html) - [Diffusers-TorchAO examples](https://github.com/sayakpaul/diffusers-torchao)
diffusers/docs/source/en/quantization/torchao.md/0
{ "file_path": "diffusers/docs/source/en/quantization/torchao.md", "repo_id": "diffusers", "token_count": 2770 }
126
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Stable Diffusion XL <Tip warning={true}> This script is experimental, and it's easy to overfit and run into issues like catastrophic forgetting. Try exploring different hyperparameters to get the best results on your dataset. </Tip> [Stable Diffusion XL (SDXL)](https://hf.co/papers/2307.01952) is a larger and more powerful iteration of the Stable Diffusion model, capable of producing higher resolution images. SDXL's UNet is 3x larger and the model adds a second text encoder to the architecture. Depending on the hardware available to you, this can be very computationally intensive and it may not run on a consumer GPU like a Tesla T4. To help fit this larger model into memory and to speedup training, try enabling `gradient_checkpointing`, `mixed_precision`, and `gradient_accumulation_steps`. You can reduce your memory-usage even more by enabling memory-efficient attention with [xFormers](../optimization/xformers) and using [bitsandbytes'](https://github.com/TimDettmers/bitsandbytes) 8-bit optimizer. This guide will explore the [train_text_to_image_sdxl.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_sdxl.py) training script to help you become more familiar with it, and how you can adapt it for your own use-case. Before running the script, make sure you install the library from source: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then navigate to the example folder containing the training script and install the required dependencies for the script you're using: ```bash cd examples/text_to_image pip install -r requirements_sdxl.txt ``` <Tip> 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. </Tip> Initialize an 🤗 Accelerate environment: ```bash accelerate config ``` To setup a default 🤗 Accelerate environment without choosing any configurations: ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell, like a notebook, you can use: ```py from accelerate.utils import write_basic_config write_basic_config() ``` Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script. ## Script parameters <Tip> The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_sdxl.py) and let us know if you have any questions or concerns. </Tip> The training script provides many parameters to help you customize your training run. All of the parameters and their descriptions are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/aab6de22c33cc01fb7bc81c0807d6109e2c998c9/examples/text_to_image/train_text_to_image_sdxl.py#L129) function. This function provides default values for each parameter, such as the training batch size and learning rate, but you can also set your own values in the training command if you'd like. For example, to speedup training with mixed precision using the bf16 format, add the `--mixed_precision` parameter to the training command: ```bash accelerate launch train_text_to_image_sdxl.py \ --mixed_precision="bf16" ``` Most of the parameters are identical to the parameters in the [Text-to-image](text2image#script-parameters) training guide, so you'll focus on the parameters that are relevant to training SDXL in this guide. - `--pretrained_vae_model_name_or_path`: path to a pretrained VAE; the SDXL VAE is known to suffer from numerical instability, so this parameter allows you to specify a better [VAE](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix) - `--proportion_empty_prompts`: the proportion of image prompts to replace with empty strings - `--timestep_bias_strategy`: where (earlier vs. later) in the timestep to apply a bias, which can encourage the model to either learn low or high frequency details - `--timestep_bias_multiplier`: the weight of the bias to apply to the timestep - `--timestep_bias_begin`: the timestep to begin applying the bias - `--timestep_bias_end`: the timestep to end applying the bias - `--timestep_bias_portion`: the proportion of timesteps to apply the bias to ### Min-SNR weighting The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting either `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch and is unavailable in the Flax training script. Add the `--snr_gamma` parameter and set it to the recommended value of 5.0: ```bash accelerate launch train_text_to_image_sdxl.py \ --snr_gamma=5.0 ``` ## Training script The training script is also similar to the [Text-to-image](text2image#training-script) training guide, but it's been modified to support SDXL training. This guide will focus on the code that is unique to the SDXL training script. It starts by creating functions to [tokenize the prompts](https://github.com/huggingface/diffusers/blob/aab6de22c33cc01fb7bc81c0807d6109e2c998c9/examples/text_to_image/train_text_to_image_sdxl.py#L478) to calculate the prompt embeddings, and to compute the image embeddings with the [VAE](https://github.com/huggingface/diffusers/blob/aab6de22c33cc01fb7bc81c0807d6109e2c998c9/examples/text_to_image/train_text_to_image_sdxl.py#L519). Next, you'll a function to [generate the timesteps weights](https://github.com/huggingface/diffusers/blob/aab6de22c33cc01fb7bc81c0807d6109e2c998c9/examples/text_to_image/train_text_to_image_sdxl.py#L531) depending on the number of timesteps and the timestep bias strategy to apply. Within the [`main()`](https://github.com/huggingface/diffusers/blob/aab6de22c33cc01fb7bc81c0807d6109e2c998c9/examples/text_to_image/train_text_to_image_sdxl.py#L572) function, in addition to loading a tokenizer, the script loads a second tokenizer and text encoder because the SDXL architecture uses two of each: ```py tokenizer_one = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False ) tokenizer_two = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer_2", revision=args.revision, use_fast=False ) text_encoder_cls_one = import_model_class_from_model_name_or_path( args.pretrained_model_name_or_path, args.revision ) text_encoder_cls_two = import_model_class_from_model_name_or_path( args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2" ) ``` The [prompt and image embeddings](https://github.com/huggingface/diffusers/blob/aab6de22c33cc01fb7bc81c0807d6109e2c998c9/examples/text_to_image/train_text_to_image_sdxl.py#L857) are computed first and kept in memory, which isn't typically an issue for a smaller dataset, but for larger datasets it can lead to memory problems. If this is the case, you should save the pre-computed embeddings to disk separately and load them into memory during the training process (see this [PR](https://github.com/huggingface/diffusers/pull/4505) for more discussion about this topic). ```py text_encoders = [text_encoder_one, text_encoder_two] tokenizers = [tokenizer_one, tokenizer_two] compute_embeddings_fn = functools.partial( encode_prompt, text_encoders=text_encoders, tokenizers=tokenizers, proportion_empty_prompts=args.proportion_empty_prompts, caption_column=args.caption_column, ) train_dataset = train_dataset.map(compute_embeddings_fn, batched=True, new_fingerprint=new_fingerprint) train_dataset = train_dataset.map( compute_vae_encodings_fn, batched=True, batch_size=args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps, new_fingerprint=new_fingerprint_for_vae, ) ``` After calculating the embeddings, the text encoder, VAE, and tokenizer are deleted to free up some memory: ```py del text_encoders, tokenizers, vae gc.collect() torch.cuda.empty_cache() ``` Finally, the [training loop](https://github.com/huggingface/diffusers/blob/aab6de22c33cc01fb7bc81c0807d6109e2c998c9/examples/text_to_image/train_text_to_image_sdxl.py#L943) takes care of the rest. If you chose to apply a timestep bias strategy, you'll see the timestep weights are calculated and added as noise: ```py weights = generate_timestep_weights(args, noise_scheduler.config.num_train_timesteps).to( model_input.device ) timesteps = torch.multinomial(weights, bsz, replacement=True).long() noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps) ``` If you want to learn more about how the training loop works, check out the [Understanding pipelines, models and schedulers](../using-diffusers/write_own_pipeline) tutorial which breaks down the basic pattern of the denoising process. ## Launch the script Once you’ve made all your changes or you’re okay with the default configuration, you’re ready to launch the training script! 🚀 Let’s train on the [Naruto BLIP captions](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions) dataset to generate your own Naruto characters. Set the environment variables `MODEL_NAME` and `DATASET_NAME` to the model and the dataset (either from the Hub or a local path). You should also specify a VAE other than the SDXL VAE (either from the Hub or a local path) with `VAE_NAME` to avoid numerical instabilities. <Tip> To monitor training progress with Weights & Biases, add the `--report_to=wandb` parameter to the training command. You’ll also need to add the `--validation_prompt` and `--validation_epochs` to the training command to keep track of results. This can be really useful for debugging the model and viewing intermediate results. </Tip> ```bash export MODEL_NAME="stabilityai/stable-diffusion-xl-base-1.0" export VAE_NAME="madebyollin/sdxl-vae-fp16-fix" export DATASET_NAME="lambdalabs/naruto-blip-captions" accelerate launch train_text_to_image_sdxl.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --pretrained_vae_model_name_or_path=$VAE_NAME \ --dataset_name=$DATASET_NAME \ --enable_xformers_memory_efficient_attention \ --resolution=512 \ --center_crop \ --random_flip \ --proportion_empty_prompts=0.2 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --max_train_steps=10000 \ --use_8bit_adam \ --learning_rate=1e-06 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --mixed_precision="fp16" \ --report_to="wandb" \ --validation_prompt="a cute Sundar Pichai creature" \ --validation_epochs 5 \ --checkpointing_steps=5000 \ --output_dir="sdxl-naruto-model" \ --push_to_hub ``` After you've finished training, you can use your newly trained SDXL model for inference! <hfoptions id="inference"> <hfoption id="PyTorch"> ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained("path/to/your/model", torch_dtype=torch.float16).to("cuda") prompt = "A naruto with green eyes and red legs." image = pipeline(prompt, num_inference_steps=30, guidance_scale=7.5).images[0] image.save("naruto.png") ``` </hfoption> <hfoption id="PyTorch XLA"> [PyTorch XLA](https://pytorch.org/xla) allows you to run PyTorch on XLA devices such as TPUs, which can be faster. The initial warmup step takes longer because the model needs to be compiled and optimized. However, subsequent calls to the pipeline on an input **with the same length** as the original prompt are much faster because it can reuse the optimized graph. ```py from diffusers import DiffusionPipeline import torch import torch_xla.core.xla_model as xm device = xm.xla_device() pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0").to(device) prompt = "A naruto with green eyes and red legs." start = time() image = pipeline(prompt, num_inference_steps=inference_steps).images[0] print(f'Compilation time is {time()-start} sec') image.save("naruto.png") start = time() image = pipeline(prompt, num_inference_steps=inference_steps).images[0] print(f'Inference time is {time()-start} sec after compilation') ``` </hfoption> </hfoptions> ## Next steps Congratulations on training a SDXL model! To learn more about how to use your new model, the following guides may be helpful: - Read the [Stable Diffusion XL](../using-diffusers/sdxl) guide to learn how to use it for a variety of different tasks (text-to-image, image-to-image, inpainting), how to use it's refiner model, and the different types of micro-conditionings. - Check out the [DreamBooth](dreambooth) and [LoRA](lora) training guides to learn how to train a personalized SDXL model with just a few example images. These two training techniques can even be combined!
diffusers/docs/source/en/training/sdxl.md/0
{ "file_path": "diffusers/docs/source/en/training/sdxl.md", "repo_id": "diffusers", "token_count": 4393 }
127
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # Community pipelines and components Community pipelines are [`DiffusionPipeline`] classes that are different from the original paper implementation. They provide additional functionality or extend the original pipeline implementation. > [!TIP] > Check out the community pipelines in [diffusers/examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) with inference and training examples for how to use them. Community pipelines are either stored on the Hub or the Diffusers' GitHub repository. Hub pipelines are completely customizable (scheduler, models, pipeline code, etc.) while GitHub pipelines are limited to only the custom pipeline code. Further compare the two community pipeline types in the table below. | | GitHub | Hub | |---|---|---| | Usage | Same. | Same. | | Review process | Open a Pull Request on GitHub and undergo a review process from the Diffusers team before merging. This option is slower. | Upload directly to a Hub repository without a review. This is the fastest option. | | Visibility | Included in the official Diffusers repository and docs. | Included on your Hub profile and relies on your own usage and promotion to gain visibility. | ## custom_pipeline Load either community pipeline types by passing the `custom_pipeline` argument to [`~DiffusionPipeline.from_pretrained`]. ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-3-medium-diffusers", custom_pipeline="pipeline_stable_diffusion_3_instruct_pix2pix", torch_dtype=torch.float16, device_map="cuda" ) ``` Add the `custom_revision` argument to [`~DiffusionPipeline.from_pretrained`] to load a community pipeline from a specific version (for example, `v0.30.0` or `main`). By default, community pipelines are loaded from the latest stable version of Diffusers. ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-3-medium-diffusers", custom_pipeline="pipeline_stable_diffusion_3_instruct_pix2pix", custom_revision="main" torch_dtype=torch.float16, device_map="cuda" ) ``` > [!WARNING] > While the Hugging Face Hub [scans](https://huggingface.co/docs/hub/security-malware) files, you should still inspect the Hub pipeline code and make sure it is safe. There are a few ways to load a community pipeline. - Pass a path to `custom_pipeline` to load a local community pipeline. The directory must contain a `pipeline.py` file containing the pipeline class. ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-3-medium-diffusers", custom_pipeline="path/to/pipeline_directory", torch_dtype=torch.float16, device_map="cuda" ) ``` - The `custom_pipeline` argument is also supported by [`~DiffusionPipeline.from_pipe`], which is useful for [reusing pipelines](./loading#reuse-a-pipeline) without using additional memory. It limits the memory usage to only the largest pipeline loaded. ```py import torch from diffusers import DiffusionPipeline pipeline_sd = DiffusionPipeline.from_pretrained("emilianJR/CyberRealistic_V3", torch_dtype=torch.float16, device_map="cuda") pipeline_lpw = DiffusionPipeline.from_pipe( pipeline_sd, custom_pipeline="lpw_stable_diffusion", device_map="cuda" ) ``` The [`~DiffusionPipeline.from_pipe`] method is especially useful for loading community pipelines because many of them don't have pretrained weights. Community pipelines generally add a feature on top of an existing pipeline. ## Community components Community components let users build pipelines with custom transformers, UNets, VAEs, and schedulers not supported by Diffusers. These components require Python module implementations. This section shows how users can use community components to build a community pipeline using [showlab/show-1-base](https://huggingface.co/showlab/show-1-base) as an example. 1. Load the required components, the scheduler and image processor. The text encoder is generally imported from [Transformers](https://huggingface.co/docs/transformers/index). ```python from transformers import T5Tokenizer, T5EncoderModel, CLIPImageProcessor from diffusers import DPMSolverMultistepScheduler pipeline_id = "showlab/show-1-base" tokenizer = T5Tokenizer.from_pretrained(pipeline_id, subfolder="tokenizer") text_encoder = T5EncoderModel.from_pretrained(pipeline_id, subfolder="text_encoder") scheduler = DPMSolverMultistepScheduler.from_pretrained(pipe_id, subfolder="scheduler") feature_extractor = CLIPImageProcessor.from_pretrained(pipe_id, subfolder="feature_extractor") ``` > [!WARNING] > In steps 2 and 3, the custom [UNet](https://github.com/showlab/Show-1/blob/main/showone/models/unet_3d_condition.py) and [pipeline](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/unet/showone_unet_3d_condition.py) implementation must match the format shown in their files for this example to work. 2. Load a [custom UNet](https://github.com/showlab/Show-1/blob/main/showone/models/unet_3d_condition.py) which is already implemented in [showone_unet_3d_condition.py](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/unet/showone_unet_3d_condition.py). The [`UNet3DConditionModel`] class name is renamed to the custom implementation, `ShowOneUNet3DConditionModel`, because [`UNet3DConditionModel`] already exists in Diffusers. Any components required for `ShowOneUNet3DConditionModel` class should be placed in `showone_unet_3d_condition.py`. ```python from showone_unet_3d_condition import ShowOneUNet3DConditionModel unet = ShowOneUNet3DConditionModel.from_pretrained(pipeline_id, subfolder="unet") ``` 3. Load the custom pipeline code (already implemented in [pipeline_t2v_base_pixel.py](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/pipeline_t2v_base_pixel.py)). This script contains a custom `TextToVideoIFPipeline` class for generating videos from text. Like the custom UNet, any code required for `TextToVideIFPipeline` should be placed in `pipeline_t2v_base_pixel.py`. Initialize `TextToVideoIFPipeline` with `ShowOneUNet3DConditionModel`. ```python import torch from pipeline_t2v_base_pixel import TextToVideoIFPipeline pipeline = TextToVideoIFPipeline( unet=unet, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, feature_extractor=feature_extractor, device_map="cuda", torch_dtype=torch.float16 ) ``` 4. Push the pipeline to the Hub to share with the community. ```python pipeline.push_to_hub("custom-t2v-pipeline") ``` After the pipeline is successfully pushed, make the following changes. - Change the `_class_name` attribute in [model_index.json](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/model_index.json#L2) to `"pipeline_t2v_base_pixel"` and `"TextToVideoIFPipeline"`. - Upload `showone_unet_3d_condition.py` to the [unet](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/unet/showone_unet_3d_condition.py) subfolder. - Upload `pipeline_t2v_base_pixel.py` to the pipeline [repository](https://huggingface.co/sayakpaul/show-1-base-with-code/tree/main). To run inference, add the `trust_remote_code` argument while initializing the pipeline to handle all the "magic" behind the scenes. ```python import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "<change-username>/<change-id>", trust_remote_code=True, torch_dtype=torch.float16 ) ``` > [!WARNING] > As an additional precaution with `trust_remote_code=True`, we strongly encourage passing a commit hash to the `revision` argument in [`~DiffusionPipeline.from_pretrained`] to make sure the code hasn't been updated with new malicious code (unless you fully trust the model owners). ## Resources - Take a look at Issue [#841](https://github.com/huggingface/diffusers/issues/841) for more context about why we're adding community pipelines to help everyone easily share their work without being slowed down. - Check out the [stabilityai/japanese-stable-diffusion-xl](https://huggingface.co/stabilityai/japanese-stable-diffusion-xl/) repository for an additional example of a community pipeline that also uses the `trust_remote_code` feature.
diffusers/docs/source/en/using-diffusers/custom_pipeline_overview.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/custom_pipeline_overview.md", "repo_id": "diffusers", "token_count": 2793 }
128
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Push files to the Hub [[open-in-colab]] 🤗 Diffusers provides a [`~diffusers.utils.PushToHubMixin`] for uploading your model, scheduler, or pipeline to the Hub. It is an easy way to store your files on the Hub, and also allows you to share your work with others. Under the hood, the [`~diffusers.utils.PushToHubMixin`]: 1. creates a repository on the Hub 2. saves your model, scheduler, or pipeline files so they can be reloaded later 3. uploads folder containing these files to the Hub This guide will show you how to use the [`~diffusers.utils.PushToHubMixin`] to upload your files to the Hub. You'll need to log in to your Hub account with your access [token](https://huggingface.co/settings/tokens) first: ```py from huggingface_hub import notebook_login notebook_login() ``` ## Models To push a model to the Hub, call [`~diffusers.utils.PushToHubMixin.push_to_hub`] and specify the repository id of the model to be stored on the Hub: ```py from diffusers import ControlNetModel controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) controlnet.push_to_hub("my-controlnet-model") ``` For models, you can also specify the [*variant*](loading#checkpoint-variants) of the weights to push to the Hub. For example, to push `fp16` weights: ```py controlnet.push_to_hub("my-controlnet-model", variant="fp16") ``` The [`~diffusers.utils.PushToHubMixin.push_to_hub`] function saves the model's `config.json` file and the weights are automatically saved in the `safetensors` format. Now you can reload the model from your repository on the Hub: ```py model = ControlNetModel.from_pretrained("your-namespace/my-controlnet-model") ``` ## Scheduler To push a scheduler to the Hub, call [`~diffusers.utils.PushToHubMixin.push_to_hub`] and specify the repository id of the scheduler to be stored on the Hub: ```py from diffusers import DDIMScheduler scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) scheduler.push_to_hub("my-controlnet-scheduler") ``` The [`~diffusers.utils.PushToHubMixin.push_to_hub`] function saves the scheduler's `scheduler_config.json` file to the specified repository. Now you can reload the scheduler from your repository on the Hub: ```py scheduler = DDIMScheduler.from_pretrained("your-namepsace/my-controlnet-scheduler") ``` ## Pipeline You can also push an entire pipeline with all it's components to the Hub. For example, initialize the components of a [`StableDiffusionPipeline`] with the parameters you want: ```py from diffusers import ( UNet2DConditionModel, AutoencoderKL, DDIMScheduler, StableDiffusionPipeline, ) from transformers import CLIPTextModel, CLIPTextConfig, CLIPTokenizer unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") ``` Pass all of the components to the [`StableDiffusionPipeline`] and call [`~diffusers.utils.PushToHubMixin.push_to_hub`] to push the pipeline to the Hub: ```py components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } pipeline = StableDiffusionPipeline(**components) pipeline.push_to_hub("my-pipeline") ``` The [`~diffusers.utils.PushToHubMixin.push_to_hub`] function saves each component to a subfolder in the repository. Now you can reload the pipeline from your repository on the Hub: ```py pipeline = StableDiffusionPipeline.from_pretrained("your-namespace/my-pipeline") ``` ## Privacy Set `private=True` in the [`~diffusers.utils.PushToHubMixin.push_to_hub`] function to keep your model, scheduler, or pipeline files private: ```py controlnet.push_to_hub("my-controlnet-model-private", private=True) ``` Private repositories are only visible to you, and other users won't be able to clone the repository and your repository won't appear in search results. Even if a user has the URL to your private repository, they'll receive a `404 - Sorry, we can't find the page you are looking for`. You must be [logged in](https://huggingface.co/docs/huggingface_hub/quick-start#login) to load a model from a private repository.
diffusers/docs/source/en/using-diffusers/push_to_hub.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/push_to_hub.md", "repo_id": "diffusers", "token_count": 2083 }
129
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <p align="center"> <br> <img src="https://raw.githubusercontent.com/huggingface/diffusers/77aadfee6a891ab9fcfb780f87c693f7a5beeb8e/docs/source/imgs/diffusers_library.jpg" width="400"/> <br> </p> # Diffusers 🤗 Diffusers は、画像や音声、さらには分子の3D構造を生成するための、最先端の事前学習済みDiffusion Model(拡散モデル)を提供するライブラリです。シンプルな生成ソリューションをお探しの場合でも、独自の拡散モデルをトレーニングしたい場合でも、🤗 Diffusers はその両方をサポートするモジュール式のツールボックスです。私たちのライブラリは、[性能より使いやすさ](conceptual/philosophy#usability-over-performance)、[簡単よりシンプル](conceptual/philosophy#simple-over-easy)、[抽象化よりカスタマイズ性](conceptual/philosophy#tweakable-contributorfriendly-over-abstraction)に重点を置いて設計されています。 このライブラリには3つの主要コンポーネントがあります: - 数行のコードで推論可能な最先端の[拡散パイプライン](api/pipelines/overview)。Diffusersには多くのパイプラインがあります。利用可能なパイプラインを網羅したリストと、それらが解決するタスクについては、パイプラインの[概要](https://huggingface.co/docs/diffusers/api/pipelines/overview)の表をご覧ください。 - 生成速度と品質のトレードオフのバランスを取る交換可能な[ノイズスケジューラ](api/schedulers/overview) - ビルディングブロックとして使用することができ、スケジューラと組み合わせることで、エンドツーエンドの拡散モデルを構築可能な事前学習済み[モデル](api/models) <div class="mt-10"> <div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5"> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./tutorials/tutorial_overview" ><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">チュートリアル</div> <p class="text-gray-700">出力の生成、独自の拡散システムの構築、拡散モデルのトレーニングを開始するために必要な基本的なスキルを学ぶことができます。初めて 🤗Diffusersを使用する場合は、ここから始めることをおすすめします!</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./using-diffusers/loading_overview" ><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">ガイド</div> <p class="text-gray-700">パイプライン、モデル、スケジューラの読み込みに役立つ実践的なガイドです。また、特定のタスクにパイプラインを使用する方法、出力の生成方法を制御する方法、生成速度を最適化する方法、さまざまなトレーニング手法についても学ぶことができます。</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./conceptual/philosophy" ><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div> <p class="text-gray-700">ライブラリがなぜこのように設計されたのかを理解し、ライブラリを利用する際の倫理的ガイドラインや安全対策について詳しく学べます。</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./api/models/overview" ><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">リファレンス</div> <p class="text-gray-700">🤗 Diffusersのクラスとメソッドがどのように機能するかについての技術的な説明です。</p> </a> </div> </div>
diffusers/docs/source/ja/index.md/0
{ "file_path": "diffusers/docs/source/ja/index.md", "repo_id": "diffusers", "token_count": 2031 }
130
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 메모리와 속도 메모리 또는 속도에 대해 🤗 Diffusers *추론*을 최적화하기 위한 몇 가지 기술과 아이디어를 제시합니다. 일반적으로, memory-efficient attention을 위해 [xFormers](https://github.com/facebookresearch/xformers) 사용을 추천하기 때문에, 추천하는 [설치 방법](xformers)을 보고 설치해 보세요. 다음 설정이 성능과 메모리에 미치는 영향에 대해 설명합니다. | | 지연시간 | 속도 향상 | | ---------------- | ------- | ------- | | 별도 설정 없음 | 9.50s | x1 | | cuDNN auto-tuner | 9.37s | x1.01 | | fp16 | 3.61s | x2.63 | | Channels Last 메모리 형식 | 3.30s | x2.88 | | traced UNet | 3.21s | x2.96 | | memory-efficient attention | 2.63s | x3.61 | <em> NVIDIA TITAN RTX에서 50 DDIM 스텝의 "a photo of an astronaut riding a horse on mars" 프롬프트로 512x512 크기의 단일 이미지를 생성하였습니다. </em> ## cuDNN auto-tuner 활성화하기 [NVIDIA cuDNN](https://developer.nvidia.com/cudnn)은 컨볼루션을 계산하는 많은 알고리즘을 지원합니다. Autotuner는 짧은 벤치마크를 실행하고 주어진 입력 크기에 대해 주어진 하드웨어에서 최고의 성능을 가진 커널을 선택합니다. **컨볼루션 네트워크**를 활용하고 있기 때문에 (다른 유형들은 현재 지원되지 않음), 다음 설정을 통해 추론 전에 cuDNN autotuner를 활성화할 수 있습니다: ```python import torch torch.backends.cudnn.benchmark = True ``` ### fp32 대신 tf32 사용하기 (Ampere 및 이후 CUDA 장치들에서) Ampere 및 이후 CUDA 장치에서 행렬곱 및 컨볼루션은 TensorFloat32(TF32) 모드를 사용하여 더 빠르지만 약간 덜 정확할 수 있습니다. 기본적으로 PyTorch는 컨볼루션에 대해 TF32 모드를 활성화하지만 행렬 곱셈은 활성화하지 않습니다. 네트워크에 완전한 float32 정밀도가 필요한 경우가 아니면 행렬 곱셈에 대해서도 이 설정을 활성화하는 것이 좋습니다. 이는 일반적으로 무시할 수 있는 수치의 정확도 손실이 있지만, 계산 속도를 크게 높일 수 있습니다. 그것에 대해 [여기](https://huggingface.co/docs/transformers/v4.18.0/en/performance#tf32)서 더 읽을 수 있습니다. 추론하기 전에 다음을 추가하기만 하면 됩니다: ```python import torch torch.backends.cuda.matmul.allow_tf32 = True ``` ## 반정밀도 가중치 더 많은 GPU 메모리를 절약하고 더 빠른 속도를 얻기 위해 모델 가중치를 반정밀도(half precision)로 직접 불러오고 실행할 수 있습니다. 여기에는 `fp16`이라는 브랜치에 저장된 float16 버전의 가중치를 불러오고, 그 때 `float16` 유형을 사용하도록 PyTorch에 지시하는 작업이 포함됩니다. ```Python pipe = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, ) pipe = pipe.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" image = pipe(prompt).images[0] ``` <Tip warning={true}> 어떤 파이프라인에서도 [`torch.autocast`](https://pytorch.org/docs/stable/amp.html#torch.autocast) 를 사용하는 것은 검은색 이미지를 생성할 수 있고, 순수한 float16 정밀도를 사용하는 것보다 항상 느리기 때문에 사용하지 않는 것이 좋습니다. </Tip> ## 추가 메모리 절약을 위한 슬라이스 어텐션 추가 메모리 절약을 위해, 한 번에 모두 계산하는 대신 단계적으로 계산을 수행하는 슬라이스 버전의 어텐션(attention)을 사용할 수 있습니다. <Tip> Attention slicing은 모델이 하나 이상의 어텐션 헤드를 사용하는 한, 배치 크기가 1인 경우에도 유용합니다. 하나 이상의 어텐션 헤드가 있는 경우 *QK^T* 어텐션 매트릭스는 상당한 양의 메모리를 절약할 수 있는 각 헤드에 대해 순차적으로 계산될 수 있습니다. </Tip> 각 헤드에 대해 순차적으로 어텐션 계산을 수행하려면, 다음과 같이 추론 전에 파이프라인에서 [`~StableDiffusionPipeline.enable_attention_slicing`]를 호출하면 됩니다: ```Python import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, ) pipe = pipe.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" pipe.enable_attention_slicing() image = pipe(prompt).images[0] ``` 추론 시간이 약 10% 느려지는 약간의 성능 저하가 있지만 이 방법을 사용하면 3.2GB 정도의 작은 VRAM으로도 Stable Diffusion을 사용할 수 있습니다! ## 더 큰 배치를 위한 sliced VAE 디코드 제한된 VRAM에서 대규모 이미지 배치를 디코딩하거나 32개 이상의 이미지가 포함된 배치를 활성화하기 위해, 배치의 latent 이미지를 한 번에 하나씩 디코딩하는 슬라이스 VAE 디코드를 사용할 수 있습니다. 이를 [`~StableDiffusionPipeline.enable_attention_slicing`] 또는 [`~StableDiffusionPipeline.enable_xformers_memory_efficient_attention`]과 결합하여 메모리 사용을 추가로 최소화할 수 있습니다. VAE 디코드를 한 번에 하나씩 수행하려면 추론 전에 파이프라인에서 [`~StableDiffusionPipeline.enable_vae_slicing`]을 호출합니다. 예를 들어: ```Python import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, ) pipe = pipe.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" pipe.enable_vae_slicing() images = pipe([prompt] * 32).images ``` 다중 이미지 배치에서 VAE 디코드가 약간의 성능 향상이 이루어집니다. 단일 이미지 배치에서는 성능 영향은 없습니다. <a name="sequential_offloading"></a> ## 메모리 절약을 위해 가속 기능을 사용하여 CPU로 오프로딩 추가 메모리 절약을 위해 가중치를 CPU로 오프로드하고 순방향 전달을 수행할 때만 GPU로 로드할 수 있습니다. CPU 오프로딩을 수행하려면 [`~StableDiffusionPipeline.enable_sequential_cpu_offload`]를 호출하기만 하면 됩니다: ```Python import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, ) prompt = "a photo of an astronaut riding a horse on mars" pipe.enable_sequential_cpu_offload() image = pipe(prompt).images[0] ``` 그러면 메모리 소비를 3GB 미만으로 줄일 수 있습니다. 참고로 이 방법은 전체 모델이 아닌 서브모듈 수준에서 작동합니다. 이는 메모리 소비를 최소화하는 가장 좋은 방법이지만 프로세스의 반복적 특성으로 인해 추론 속도가 훨씬 느립니다. 파이프라인의 UNet 구성 요소는 여러 번 실행됩니다('num_inference_steps' 만큼). 매번 UNet의 서로 다른 서브모듈이 순차적으로 온로드된 다음 필요에 따라 오프로드되므로 메모리 이동 횟수가 많습니다. <Tip> 또 다른 최적화 방법인 <a href="#model_offloading">모델 오프로딩</a>을 사용하는 것을 고려하십시오. 이는 훨씬 빠르지만 메모리 절약이 크지는 않습니다. </Tip> 또한 ttention slicing과 연결해서 최소 메모리(< 2GB)로도 동작할 수 있습니다. ```Python import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, ) prompt = "a photo of an astronaut riding a horse on mars" pipe.enable_sequential_cpu_offload() pipe.enable_attention_slicing(1) image = pipe(prompt).images[0] ``` **참고**: 'enable_sequential_cpu_offload()'를 사용할 때, 미리 파이프라인을 CUDA로 이동하지 **않는** 것이 중요합니다.그렇지 않으면 메모리 소비의 이득이 최소화됩니다. 더 많은 정보를 위해 [이 이슈](https://github.com/huggingface/diffusers/issues/1934)를 보세요. <a name="model_offloading"></a> ## 빠른 추론과 메모리 메모리 절약을 위한 모델 오프로딩 [순차적 CPU 오프로딩](#sequential_offloading)은 이전 섹션에서 설명한 것처럼 많은 메모리를 보존하지만 필요에 따라 서브모듈을 GPU로 이동하고 새 모듈이 실행될 때 즉시 CPU로 반환되기 때문에 추론 속도가 느려집니다. 전체 모델 오프로딩은 각 모델의 구성 요소인 _modules_을 처리하는 대신, 전체 모델을 GPU로 이동하는 대안입니다. 이로 인해 추론 시간에 미치는 영향은 미미하지만(파이프라인을 'cuda'로 이동하는 것과 비교하여) 여전히 약간의 메모리를 절약할 수 있습니다. 이 시나리오에서는 파이프라인의 주요 구성 요소 중 하나만(일반적으로 텍스트 인코더, unet 및 vae) GPU에 있고, 나머지는 CPU에서 대기할 것입니다. 여러 반복을 위해 실행되는 UNet과 같은 구성 요소는 더 이상 필요하지 않을 때까지 GPU에 남아 있습니다. 이 기능은 아래와 같이 파이프라인에서 `enable_model_cpu_offload()`를 호출하여 활성화할 수 있습니다. ```Python import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, ) prompt = "a photo of an astronaut riding a horse on mars" pipe.enable_model_cpu_offload() image = pipe(prompt).images[0] ``` 이는 추가적인 메모리 절약을 위한 attention slicing과도 호환됩니다. ```Python import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, ) prompt = "a photo of an astronaut riding a horse on mars" pipe.enable_model_cpu_offload() pipe.enable_attention_slicing(1) image = pipe(prompt).images[0] ``` <Tip> 이 기능을 사용하려면 'accelerate' 버전 0.17.0 이상이 필요합니다. </Tip> ## Channels Last 메모리 형식 사용하기 Channels Last 메모리 형식은 차원 순서를 보존하는 메모리에서 NCHW 텐서 배열을 대체하는 방법입니다. Channels Last 텐서는 채널이 가장 조밀한 차원이 되는 방식으로 정렬됩니다(일명 픽셀당 이미지를 저장). 현재 모든 연산자 Channels Last 형식을 지원하는 것은 아니라 성능이 저하될 수 있으므로, 사용해보고 모델에 잘 작동하는지 확인하는 것이 좋습니다. 예를 들어 파이프라인의 UNet 모델이 channels Last 형식을 사용하도록 설정하려면 다음을 사용할 수 있습니다: ```python print(pipe.unet.conv_out.state_dict()["weight"].stride()) # (2880, 9, 3, 1) pipe.unet.to(memory_format=torch.channels_last) # in-place 연산 # 2번째 차원에서 스트라이드 1을 가지는 (2880, 1, 960, 320)로, 연산이 작동함을 증명합니다. print(pipe.unet.conv_out.state_dict()["weight"].stride()) ``` ## 추적(tracing) 추적은 모델을 통해 예제 입력 텐서를 통해 실행되는데, 해당 입력이 모델의 레이어를 통과할 때 호출되는 작업을 캡처하여 실행 파일 또는 'ScriptFunction'이 반환되도록 하고, 이는 just-in-time 컴파일로 최적화됩니다. UNet 모델을 추적하기 위해 다음을 사용할 수 있습니다: ```python import time import torch from diffusers import StableDiffusionPipeline import functools # torch 기울기 비활성화 torch.set_grad_enabled(False) # 변수 설정 n_experiments = 2 unet_runs_per_experiment = 50 # 입력 불러오기 def generate_inputs(): sample = torch.randn((2, 4, 64, 64), device="cuda", dtype=torch.float16) timestep = torch.rand(1, device="cuda", dtype=torch.float16) * 999 encoder_hidden_states = torch.randn((2, 77, 768), device="cuda", dtype=torch.float16) return sample, timestep, encoder_hidden_states pipe = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, ).to("cuda") unet = pipe.unet unet.eval() unet.to(memory_format=torch.channels_last) # Channels Last 메모리 형식 사용 unet.forward = functools.partial(unet.forward, return_dict=False) # return_dict=False을 기본값으로 설정 # 워밍업 for _ in range(3): with torch.inference_mode(): inputs = generate_inputs() orig_output = unet(*inputs) # 추적 print("tracing..") unet_traced = torch.jit.trace(unet, inputs) unet_traced.eval() print("done tracing") # 워밍업 및 그래프 최적화 for _ in range(5): with torch.inference_mode(): inputs = generate_inputs() orig_output = unet_traced(*inputs) # 벤치마킹 with torch.inference_mode(): for _ in range(n_experiments): torch.cuda.synchronize() start_time = time.time() for _ in range(unet_runs_per_experiment): orig_output = unet_traced(*inputs) torch.cuda.synchronize() print(f"unet traced inference took {time.time() - start_time:.2f} seconds") for _ in range(n_experiments): torch.cuda.synchronize() start_time = time.time() for _ in range(unet_runs_per_experiment): orig_output = unet(*inputs) torch.cuda.synchronize() print(f"unet inference took {time.time() - start_time:.2f} seconds") # 모델 저장 unet_traced.save("unet_traced.pt") ``` 그 다음, 파이프라인의 `unet` 특성을 다음과 같이 추적된 모델로 바꿀 수 있습니다. ```python from diffusers import StableDiffusionPipeline import torch from dataclasses import dataclass @dataclass class UNet2DConditionOutput: sample: torch.Tensor pipe = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, ).to("cuda") # jitted unet 사용 unet_traced = torch.jit.load("unet_traced.pt") # pipe.unet 삭제 class TracedUNet(torch.nn.Module): def __init__(self): super().__init__() self.in_channels = pipe.unet.config.in_channels self.device = pipe.unet.device def forward(self, latent_model_input, t, encoder_hidden_states): sample = unet_traced(latent_model_input, t, encoder_hidden_states)[0] return UNet2DConditionOutput(sample=sample) pipe.unet = TracedUNet() with torch.inference_mode(): image = pipe([prompt] * 1, num_inference_steps=50).images[0] ``` ## Memory-efficient attention 어텐션 블록의 대역폭을 최적화하는 최근 작업으로 GPU 메모리 사용량이 크게 향상되고 향상되었습니다. @tridao의 가장 최근의 플래시 어텐션: [code](https://github.com/HazyResearch/flash-attention), [paper](https://huggingface.co/papers/2205.14135). 배치 크기 1(프롬프트 1개)의 512x512 크기로 추론을 실행할 때 몇 가지 Nvidia GPU에서 얻은 속도 향상은 다음과 같습니다: | GPU | 기준 어텐션 FP16 | 메모리 효율적인 어텐션 FP16 | |------------------ |--------------------- |--------------------------------- | | NVIDIA Tesla T4 | 3.5it/s | 5.5it/s | | NVIDIA 3060 RTX | 4.6it/s | 7.8it/s | | NVIDIA A10G | 8.88it/s | 15.6it/s | | NVIDIA RTX A6000 | 11.7it/s | 21.09it/s | | NVIDIA TITAN RTX | 12.51it/s | 18.22it/s | | A100-SXM4-40GB | 18.6it/s | 29.it/s | | A100-SXM-80GB | 18.7it/s | 29.5it/s | 이를 활용하려면 다음을 만족해야 합니다: - PyTorch > 1.12 - Cuda 사용 가능 - [xformers 라이브러리를 설치함](xformers) ```python from diffusers import StableDiffusionPipeline import torch pipe = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, ).to("cuda") pipe.enable_xformers_memory_efficient_attention() with torch.inference_mode(): sample = pipe("a small cat") # 선택: 이를 비활성화 하기 위해 다음을 사용할 수 있습니다. # pipe.disable_xformers_memory_efficient_attention() ```
diffusers/docs/source/ko/optimization/fp16.md/0
{ "file_path": "diffusers/docs/source/ko/optimization/fp16.md", "repo_id": "diffusers", "token_count": 10834 }
131
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # InstructPix2Pix [InstructPix2Pix](https://huggingface.co/papers/2211.09800)는 text-conditioned diffusion 모델이 한 이미지에 편집을 따를 수 있도록 파인튜닝하는 방법입니다. 이 방법을 사용하여 파인튜닝된 모델은 다음을 입력으로 사용합니다: <p align="center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/evaluation_diffusion_models/edit-instruction.png" alt="instructpix2pix-inputs" width=600/> </p> 출력은 입력 이미지에 편집 지시가 반영된 "수정된" 이미지입니다: <p align="center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/output-gs%407-igs%401-steps%4050.png" alt="instructpix2pix-output" width=600/> </p> `train_instruct_pix2pix.py` 스크립트([여기](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py)에서 찾을 수 있습니다.)는 학습 절차를 설명하고 Stable Diffusion에 적용할 수 있는 방법을 보여줍니다. *** `train_instruct_pix2pix.py`는 [원래 구현](https://github.com/timothybrooks/instruct-pix2pix)에 충실하면서 InstructPix2Pix 학습 절차를 구현하고 있지만, [소규모 데이터셋](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples)에서만 테스트를 했습니다. 이는 최종 결과에 영향을 끼칠 수 있습니다. 더 나은 결과를 위해, 더 큰 데이터셋에서 더 길게 학습하는 것을 권장합니다. [여기](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered)에서 InstructPix2Pix 학습을 위해 큰 데이터셋을 찾을 수 있습니다. *** ## PyTorch로 로컬에서 실행하기 ### 종속성(dependencies) 설치하기 이 스크립트를 실행하기 전에, 라이브러리의 학습 종속성을 설치하세요: **중요** 최신 버전의 예제 스크립트를 성공적으로 실행하기 위해, **원본으로부터 설치**하는 것과 예제 스크립트를 자주 업데이트하고 예제별 요구사항을 설치하기 때문에 최신 상태로 유지하는 것을 권장합니다. 이를 위해, 새로운 가상 환경에서 다음 스텝을 실행하세요: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` cd 명령어로 예제 폴더로 이동하세요. ```bash cd examples/instruct_pix2pix ``` 이제 실행하세요. ```bash pip install -r requirements.txt ``` 그리고 [🤗Accelerate](https://github.com/huggingface/accelerate/) 환경에서 초기화하세요: ```bash accelerate config ``` 혹은 환경에 대한 질문 없이 기본적인 accelerate 구성을 사용하려면 다음을 실행하세요. ```bash accelerate config default ``` 혹은 사용 중인 환경이 notebook과 같은 대화형 쉘은 지원하지 않는 경우는 다음 절차를 따라주세요. ```python from accelerate.utils import write_basic_config write_basic_config() ``` ### 예시 이전에 언급했듯이, 학습을 위해 [작은 데이터셋](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples)을 사용할 것입니다. 그 데이터셋은 InstructPix2Pix 논문에서 사용된 [원래의 데이터셋](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered)보다 작은 버전입니다. 자신의 데이터셋을 사용하기 위해, [학습을 위한 데이터셋 만들기](create_dataset) 가이드를 참고하세요. `MODEL_NAME` 환경 변수(허브 모델 레포지토리 또는 모델 가중치가 포함된 폴더 경로)를 지정하고 [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) 인수에 전달합니다. `DATASET_ID`에 데이터셋 이름을 지정해야 합니다: ```bash export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5" export DATASET_ID="fusing/instructpix2pix-1000-samples" ``` 지금, 학습을 실행할 수 있습니다. 스크립트는 레포지토리의 하위 폴더의 모든 구성요소(`feature_extractor`, `scheduler`, `text_encoder`, `unet` 등)를 저장합니다. ```bash accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_ID \ --enable_xformers_memory_efficient_attention \ --resolution=256 --random_flip \ --train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \ --max_train_steps=15000 \ --checkpointing_steps=5000 --checkpoints_total_limit=1 \ --learning_rate=5e-05 --max_grad_norm=1 --lr_warmup_steps=0 \ --conditioning_dropout_prob=0.05 \ --mixed_precision=fp16 \ --seed=42 \ --push_to_hub ``` 추가적으로, 가중치와 바이어스를 학습 과정에 모니터링하여 검증 추론을 수행하는 것을 지원합니다. `report_to="wandb"`와 이 기능을 사용할 수 있습니다: ```bash accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_ID \ --enable_xformers_memory_efficient_attention \ --resolution=256 --random_flip \ --train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \ --max_train_steps=15000 \ --checkpointing_steps=5000 --checkpoints_total_limit=1 \ --learning_rate=5e-05 --max_grad_norm=1 --lr_warmup_steps=0 \ --conditioning_dropout_prob=0.05 \ --mixed_precision=fp16 \ --val_image_url="https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" \ --validation_prompt="make the mountains snowy" \ --seed=42 \ --report_to=wandb \ --push_to_hub ``` 모델 디버깅에 유용한 이 평가 방법 권장합니다. 이를 사용하기 위해 `wandb`를 설치하는 것을 주목해주세요. `pip install wandb`로 실행해 `wandb`를 설치할 수 있습니다. [여기](https://wandb.ai/sayakpaul/instruct-pix2pix/runs/ctr3kovq), 몇 가지 평가 방법과 학습 파라미터를 포함하는 예시를 볼 수 있습니다. ***참고: 원본 논문에서, 저자들은 256x256 이미지 해상도로 학습한 모델로 512x512와 같은 더 큰 해상도로 잘 일반화되는 것을 볼 수 있었습니다. 이는 학습에 사용한 큰 데이터셋을 사용했기 때문입니다.*** ## 다수의 GPU로 학습하기 `accelerate`는 원활한 다수의 GPU로 학습을 가능하게 합니다. `accelerate`로 분산 학습을 실행하는 [여기](https://huggingface.co/docs/accelerate/basic_tutorials/launch) 설명을 따라 해 주시기 바랍니다. 예시의 명령어 입니다: ```bash accelerate launch --mixed_precision="fp16" --multi_gpu train_instruct_pix2pix.py \ --pretrained_model_name_or_path=stable-diffusion-v1-5/stable-diffusion-v1-5 \ --dataset_name=sayakpaul/instructpix2pix-1000-samples \ --use_ema \ --enable_xformers_memory_efficient_attention \ --resolution=512 --random_flip \ --train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \ --max_train_steps=15000 \ --checkpointing_steps=5000 --checkpoints_total_limit=1 \ --learning_rate=5e-05 --lr_warmup_steps=0 \ --conditioning_dropout_prob=0.05 \ --mixed_precision=fp16 \ --seed=42 \ --push_to_hub ``` ## 추론하기 일단 학습이 완료되면, 추론 할 수 있습니다: ```python import PIL import requests import torch from diffusers import StableDiffusionInstructPix2PixPipeline model_id = "your_model_id" # <- 이를 수정하세요. pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") generator = torch.Generator("cuda").manual_seed(0) url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/test_pix2pix_4.png" def download_image(url): image = PIL.Image.open(requests.get(url, stream=True).raw) image = PIL.ImageOps.exif_transpose(image) image = image.convert("RGB") return image image = download_image(url) prompt = "wipe out the lake" num_inference_steps = 20 image_guidance_scale = 1.5 guidance_scale = 10 edited_image = pipe( prompt, image=image, num_inference_steps=num_inference_steps, image_guidance_scale=image_guidance_scale, guidance_scale=guidance_scale, generator=generator, ).images[0] edited_image.save("edited_image.png") ``` 학습 스크립트를 사용해 얻은 예시의 모델 레포지토리는 여기 [sayakpaul/instruct-pix2pix](https://huggingface.co/sayakpaul/instruct-pix2pix)에서 확인할 수 있습니다. 성능을 위한 속도와 품질을 제어하기 위해 세 가지 파라미터를 사용하는 것이 좋습니다: * `num_inference_steps` * `image_guidance_scale` * `guidance_scale` 특히, `image_guidance_scale`와 `guidance_scale`는 생성된("수정된") 이미지에서 큰 영향을 미칠 수 있습니다.([여기](https://twitter.com/RisingSayak/status/1628392199196151808?s=20)예시를 참고해주세요.) 만약 InstructPix2Pix 학습 방법을 사용해 몇 가지 흥미로운 방법을 찾고 있다면, 이 블로그 게시물[Instruction-tuning Stable Diffusion with InstructPix2Pix](https://huggingface.co/blog/instruction-tuning-sd)을 확인해주세요.
diffusers/docs/source/ko/training/instructpix2pix.md/0
{ "file_path": "diffusers/docs/source/ko/training/instructpix2pix.md", "repo_id": "diffusers", "token_count": 5663 }
132
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 파이프라인, 모델, 스케줄러 불러오기 기본적으로 diffusion 모델은 다양한 컴포넌트들(모델, 토크나이저, 스케줄러) 간의 복잡한 상호작용을 기반으로 동작합니다. 디퓨저스(Diffusers)는 이러한 diffusion 모델을 보다 쉽고 간편한 API로 제공하는 것을 목표로 설계되었습니다. [`DiffusionPipeline`]은 diffusion 모델이 갖는 복잡성을 하나의 파이프라인 API로 통합하고, 동시에 이를 구성하는 각각의 컴포넌트들을 태스크에 맞춰 유연하게 커스터마이징할 수 있도록 지원하고 있습니다. diffusion 모델의 훈련과 추론에 필요한 모든 것은 [`DiffusionPipeline.from_pretrained`] 메서드를 통해 접근할 수 있습니다. (이 말의 의미는 다음 단락에서 보다 자세하게 다뤄보도록 하겠습니다.) 이 문서에서는 설명할 내용은 다음과 같습니다. * 허브를 통해 혹은 로컬로 파이프라인을 불러오는 법 * 파이프라인에 다른 컴포넌트들을 적용하는 법 * 오리지널 체크포인트가 아닌 variant를 불러오는 법 (variant란 기본으로 설정된 `fp32`가 아닌 다른 부동 소수점 타입(예: `fp16`)을 사용하거나 Non-EMA 가중치를 사용하는 체크포인트들을 의미합니다.) * 모델과 스케줄러를 불러오는 법 ## Diffusion 파이프라인 <Tip> 💡 [`DiffusionPipeline`] 클래스가 동작하는 방식에 보다 자세한 내용이 궁금하다면, [DiffusionPipeline explained](#diffusionpipeline에-대해-알아보기) 섹션을 확인해보세요. </Tip> [`DiffusionPipeline`] 클래스는 diffusion 모델을 [허브](https://huggingface.co/models?library=diffusers)로부터 불러오는 가장 심플하면서 보편적인 방식입니다. [`DiffusionPipeline.from_pretrained`] 메서드는 적합한 파이프라인 클래스를 자동으로 탐지하고, 필요한 구성요소(configuration)와 가중치(weight) 파일들을 다운로드하고 캐싱한 다음, 해당 파이프라인 인스턴스를 반환합니다. ```python from diffusers import DiffusionPipeline repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipe = DiffusionPipeline.from_pretrained(repo_id) ``` 물론 [`DiffusionPipeline`] 클래스를 사용하지 않고, 명시적으로 직접 해당 파이프라인 클래스를 불러오는 것도 가능합니다. 아래 예시 코드는 위 예시와 동일한 인스턴스를 반환합니다. ```python from diffusers import StableDiffusionPipeline repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(repo_id) ``` [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4)이나 [stable-diffusion-v1-5/stable-diffusion-v1-5](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) 같은 체크포인트들의 경우, 하나 이상의 다양한 태스크에 활용될 수 있습니다. (예를 들어 위의 두 체크포인트의 경우, text-to-image와 image-to-image에 모두 활용될 수 있습니다.) 만약 이러한 체크포인트들을 기본 설정 태스크가 아닌 다른 태스크에 활용하고자 한다면, 해당 태스크에 대응되는 파이프라인(task-specific pipeline)을 사용해야 합니다. ```python from diffusers import StableDiffusionImg2ImgPipeline repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipe = StableDiffusionImg2ImgPipeline.from_pretrained(repo_id) ``` ### 로컬 파이프라인 파이프라인을 로컬로 불러오고자 한다면, `git-lfs`를 사용하여 직접 체크포인트를 로컬 디스크에 다운로드 받아야 합니다. 아래의 명령어를 실행하면 `./stable-diffusion-v1-5`란 이름으로 폴더가 로컬디스크에 생성됩니다. ```bash git lfs install git clone https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5 ``` 그런 다음 해당 로컬 경로를 [`~DiffusionPipeline.from_pretrained`] 메서드에 전달합니다. ```python from diffusers import DiffusionPipeline repo_id = "./stable-diffusion-v1-5" stable_diffusion = DiffusionPipeline.from_pretrained(repo_id) ``` 위의 예시코드처럼 만약 `repo_id`가 로컬 패스(local path)라면, [`~DiffusionPipeline.from_pretrained`] 메서드는 이를 자동으로 감지하여 허브에서 파일을 다운로드하지 않습니다. 만약 로컬 디스크에 저장된 파이프라인 체크포인트가 최신 버전이 아닐 경우에도, 최신 버전을 다운로드하지 않고 기존 로컬 디스크에 저장된 체크포인트를 사용한다는 것을 의미합니다. ### 파이프라인 내부의 컴포넌트 교체하기 파이프라인 내부의 컴포넌트들은 호환 가능한 다른 컴포넌트로 교체될 수 있습니다. 이와 같은 컴포넌트 교체가 중요한 이유는 다음과 같습니다. - 어떤 스케줄러를 사용할 것인가는 생성속도와 생성품질 간의 트레이드오프를 정의하는 중요한 요소입니다. - diffusion 모델 내부의 컴포넌트들은 일반적으로 각각 독립적으로 훈련되기 때문에, 더 좋은 성능을 보여주는 컴포넌트가 있다면 그걸로 교체하는 식으로 성능을 향상시킬 수 있습니다. - 파인 튜닝 단계에서는 일반적으로 UNet 혹은 텍스트 인코더와 같은 일부 컴포넌트들만 훈련하게 됩니다. 어떤 스케줄러들이 호환가능한지는 `compatibles` 속성을 통해 확인할 수 있습니다. ```python from diffusers import DiffusionPipeline repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" stable_diffusion = DiffusionPipeline.from_pretrained(repo_id) stable_diffusion.scheduler.compatibles ``` 이번에는 [`SchedulerMixin.from_pretrained`] 메서드를 사용해서, 기존 기본 스케줄러였던 [`PNDMScheduler`]를 보다 우수한 성능의 [`EulerDiscreteScheduler`]로 바꿔봅시다. 스케줄러를 로드할 때는 `subfolder` 인자를 통해, 해당 파이프라인의 리포지토리에서 [스케줄러에 관한 하위폴더](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/tree/main/scheduler)를 명시해주어야 합니다. 그 다음 새롭게 생성한 [`EulerDiscreteScheduler`] 인스턴스를 [`DiffusionPipeline`]의 `scheduler` 인자에 전달합니다. ```python from diffusers import DiffusionPipeline, EulerDiscreteScheduler, DPMSolverMultistepScheduler repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" scheduler = EulerDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") stable_diffusion = DiffusionPipeline.from_pretrained(repo_id, scheduler=scheduler) ``` ### 세이프티 체커 스테이블 diffusion과 같은 diffusion 모델들은 유해한 이미지를 생성할 수도 있습니다. 이를 예방하기 위해 디퓨저스는 생성된 이미지의 유해성을 판단하는 [세이프티 체커(safety checker)](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) 기능을 지원하고 있습니다. 만약 세이프티 체커의 사용을 원하지 않는다면, `safety_checker` 인자에 `None`을 전달해주시면 됩니다. ```python from diffusers import DiffusionPipeline repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" stable_diffusion = DiffusionPipeline.from_pretrained(repo_id, safety_checker=None) ``` ### 컴포넌트 재사용 복수의 파이프라인에 동일한 모델이 반복적으로 사용한다면, 굳이 해당 모델의 동일한 가중치를 중복으로 RAM에 불러올 필요는 없을 것입니다. [`~DiffusionPipeline.components`] 속성을 통해 파이프라인 내부의 컴포넌트들을 참조할 수 있는데, 이번 단락에서는 이를 통해 동일한 모델 가중치를 RAM에 중복으로 불러오는 것을 방지하는 법에 대해 알아보겠습니다. ```python from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" stable_diffusion_txt2img = StableDiffusionPipeline.from_pretrained(model_id) components = stable_diffusion_txt2img.components ``` 그 다음 위 예시 코드에서 선언한 `components` 변수를 다른 파이프라인에 전달함으로써, 모델의 가중치를 중복으로 RAM에 로딩하지 않고, 동일한 컴포넌트를 재사용할 수 있습니다. ```python stable_diffusion_img2img = StableDiffusionImg2ImgPipeline(**components) ``` 물론 각각의 컴포넌트들을 따로 따로 파이프라인에 전달할 수도 있습니다. 예를 들어 `stable_diffusion_txt2img` 파이프라인 안의 컴포넌트들 가운데서 세이프티 체커(`safety_checker`)와 피쳐 익스트랙터(`feature_extractor`)를 제외한 컴포넌트들만 `stable_diffusion_img2img` 파이프라인에서 재사용하는 방식 역시 가능합니다. ```python from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" stable_diffusion_txt2img = StableDiffusionPipeline.from_pretrained(model_id) stable_diffusion_img2img = StableDiffusionImg2ImgPipeline( vae=stable_diffusion_txt2img.vae, text_encoder=stable_diffusion_txt2img.text_encoder, tokenizer=stable_diffusion_txt2img.tokenizer, unet=stable_diffusion_txt2img.unet, scheduler=stable_diffusion_txt2img.scheduler, safety_checker=None, feature_extractor=None, requires_safety_checker=False, ) ``` ## Checkpoint variants Variant란 일반적으로 다음과 같은 체크포인트들을 의미합니다. - `torch.float16`과 같이 정밀도는 더 낮지만, 용량 역시 더 작은 부동소수점 타입의 가중치를 사용하는 체크포인트. *(다만 이와 같은 variant의 경우, 추가적인 훈련과 CPU환경에서의 구동이 불가능합니다.)* - Non-EMA 가중치를 사용하는 체크포인트. *(Non-EMA 가중치의 경우, 파인 튜닝 단계에서 사용하는 것이 권장되는데, 추론 단계에선 사용하지 않는 것이 권장됩니다.)* <Tip> 💡 모델 구조는 동일하지만 서로 다른 학습 환경에서 서로 다른 데이터셋으로 학습된 체크포인트들이 있을 경우, 해당 체크포인트들은 variant 단계가 아닌 리포지토리 단계에서 분리되어 관리되어야 합니다. (즉, 해당 체크포인트들은 서로 다른 리포지토리에서 따로 관리되어야 합니다. 예시: [`stable-diffusion-v1-4`], [`stable-diffusion-v1-5`]). </Tip> | **checkpoint type** | **weight name** | **argument for loading weights** | | ------------------- | ----------------------------------- | -------------------------------- | | original | diffusion_pytorch_model.bin | | | floating point | diffusion_pytorch_model.fp16.bin | `variant`, `torch_dtype` | | non-EMA | diffusion_pytorch_model.non_ema.bin | `variant` | variant를 로드할 때 2개의 중요한 argument가 있습니다. * `torch_dtype`은 불러올 체크포인트의 부동소수점을 정의합니다. 예를 들어 `torch_dtype=torch.float16`을 명시함으로써 가중치의 부동소수점 타입을 `fl16`으로 변환할 수 있습니다. (만약 따로 설정하지 않을 경우, 기본값으로 `fp32` 타입의 가중치가 로딩됩니다.) 또한 `variant` 인자를 명시하지 않은 채로 체크포인트를 불러온 다음, 해당 체크포인트를 `torch_dtype=torch.float16` 인자를 통해 `fp16` 타입으로 변환하는 것 역시 가능합니다. 이 경우 기본으로 설정된 `fp32` 가중치가 먼저 다운로드되고, 해당 가중치들을 불러온 다음 `fp16` 타입으로 변환하게 됩니다. * `variant` 인자는 리포지토리에서 어떤 variant를 불러올 것인가를 정의합니다. 가령 [`diffusers/stable-diffusion-variants`](https://huggingface.co/diffusers/stable-diffusion-variants/tree/main/unet) 리포지토리로부터 `non_ema` 체크포인트를 불러오고자 한다면, `variant="non_ema"` 인자를 전달해야 합니다. ```python from diffusers import DiffusionPipeline # load fp16 variant stable_diffusion = DiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16 ) # load non_ema variant stable_diffusion = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", variant="non_ema") ``` 다른 부동소수점 타입의 가중치 혹은 non-EMA 가중치를 사용하는 체크포인트를 저장하기 위해서는, [`DiffusionPipeline.save_pretrained`] 메서드를 사용해야 하며, 이 때 `variant` 인자를 명시해줘야 합니다. 원래의 체크포인트와 동일한 폴더에 variant를 저장해야 하며, 이렇게 하면 동일한 폴더에서 오리지널 체크포인트과 variant를 모두 불러올 수 있습니다. ```python from diffusers import DiffusionPipeline # save as fp16 variant stable_diffusion.save_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", variant="fp16") # save as non-ema variant stable_diffusion.save_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", variant="non_ema") ``` 만약 variant를 기존 폴더에 저장하지 않을 경우, `variant` 인자를 반드시 명시해야 합니다. 그렇게 하지 않을 경우 원래의 오리지널 체크포인트를 찾을 수 없게 되기 때문에 에러가 발생합니다. ```python # 👎 this won't work stable_diffusion = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5", torch_dtype=torch.float16) # 👍 this works stable_diffusion = DiffusionPipeline.from_pretrained( "./stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16 ) ``` ### 모델 불러오기 모델들은 [`ModelMixin.from_pretrained`] 메서드를 통해 불러올 수 있습니다. 해당 메서드는 최신 버전의 모델 가중치 파일과 설정 파일(configurations)을 다운로드하고 캐싱합니다. 만약 이러한 파일들이 최신 버전으로 로컬 캐시에 저장되어 있다면, [`ModelMixin.from_pretrained`]는 굳이 해당 파일들을 다시 다운로드하지 않으며, 그저 캐시에 있는 최신 파일들을 재사용합니다. 모델은 `subfolder` 인자에 명시된 하위 폴더로부터 로드됩니다. 예를 들어 `stable-diffusion-v1-5/stable-diffusion-v1-5`의 UNet 모델의 가중치는 [`unet`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/tree/main/unet) 폴더에 저장되어 있습니다. ```python from diffusers import UNet2DConditionModel repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" model = UNet2DConditionModel.from_pretrained(repo_id, subfolder="unet") ``` 혹은 [해당 모델의 리포지토리](https://huggingface.co/google/ddpm-cifar10-32/tree/main)로부터 다이렉트로 가져오는 것 역시 가능합니다. ```python from diffusers import UNet2DModel repo_id = "google/ddpm-cifar10-32" model = UNet2DModel.from_pretrained(repo_id) ``` 또한 앞서 봤던 `variant` 인자를 명시함으로써, Non-EMA나 `fp16`의 가중치를 가져오는 것 역시 가능합니다. ```python from diffusers import UNet2DConditionModel model = UNet2DConditionModel.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="unet", variant="non-ema") model.save_pretrained("./local-unet", variant="non-ema") ``` ### 스케줄러 스케줄러들은 [`SchedulerMixin.from_pretrained`] 메서드를 통해 불러올 수 있습니다. 모델과 달리 스케줄러는 별도의 가중치를 갖지 않으며, 따라서 당연히 별도의 학습과정을 요구하지 않습니다. 이러한 스케줄러들은 (해당 스케줄러 하위폴더의) configration 파일을 통해 정의됩니다. 여러개의 스케줄러를 불러온다고 해서 많은 메모리를 소모하는 것은 아니며, 다양한 스케줄러들에 동일한 스케줄러 configration을 적용하는 것 역시 가능합니다. 다음 예시 코드에서 불러오는 스케줄러들은 모두 [`StableDiffusionPipeline`]과 호환되는데, 이는 곧 해당 스케줄러들에 동일한 스케줄러 configration 파일을 적용할 수 있음을 의미합니다. ```python from diffusers import StableDiffusionPipeline from diffusers import ( DDPMScheduler, DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ) repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" ddpm = DDPMScheduler.from_pretrained(repo_id, subfolder="scheduler") ddim = DDIMScheduler.from_pretrained(repo_id, subfolder="scheduler") pndm = PNDMScheduler.from_pretrained(repo_id, subfolder="scheduler") lms = LMSDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") euler_anc = EulerAncestralDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") euler = EulerDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") dpm = DPMSolverMultistepScheduler.from_pretrained(repo_id, subfolder="scheduler") # replace `dpm` with any of `ddpm`, `ddim`, `pndm`, `lms`, `euler_anc`, `euler` pipeline = StableDiffusionPipeline.from_pretrained(repo_id, scheduler=dpm) ``` ### DiffusionPipeline에 대해 알아보기 클래스 메서드로서 [`DiffusionPipeline.from_pretrained`]은 2가지를 담당합니다. - 첫째로, `from_pretrained` 메서드는 최신 버전의 파이프라인을 다운로드하고, 캐시에 저장합니다. 이미 로컬 캐시에 최신 버전의 파이프라인이 저장되어 있다면, [`DiffusionPipeline.from_pretrained`]은 해당 파일들을 다시 다운로드하지 않고, 로컬 캐시에 저장되어 있는 파이프라인을 불러옵니다. - `model_index.json` 파일을 통해 체크포인트에 대응되는 적합한 파이프라인 클래스로 불러옵니다. 파이프라인의 폴더 구조는 해당 파이프라인 클래스의 구조와 직접적으로 일치합니다. 예를 들어 [`StableDiffusionPipeline`] 클래스는 [`stable-diffusion-v1-5/stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) 리포지토리와 대응되는 구조를 갖습니다. ```python from diffusers import DiffusionPipeline repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipeline = DiffusionPipeline.from_pretrained(repo_id) print(pipeline) ``` 위의 코드 출력 결과를 확인해보면, `pipeline`은 [`StableDiffusionPipeline`]의 인스턴스이며, 다음과 같이 총 7개의 컴포넌트로 구성된다는 것을 알 수 있습니다. - `"feature_extractor"`: [`~transformers.CLIPImageProcessor`]의 인스턴스 - `"safety_checker"`: 유해한 컨텐츠를 스크리닝하기 위한 [컴포넌트](https://github.com/huggingface/diffusers/blob/e55687e1e15407f60f32242027b7bb8170e58266/src/diffusers/pipelines/stable_diffusion/safety_checker.py#L32) - `"scheduler"`: [`PNDMScheduler`]의 인스턴스 - `"text_encoder"`: [`~transformers.CLIPTextModel`]의 인스턴스 - `"tokenizer"`: a [`~transformers.CLIPTokenizer`]의 인스턴스 - `"unet"`: [`UNet2DConditionModel`]의 인스턴스 - `"vae"` [`AutoencoderKL`]의 인스턴스 ```json StableDiffusionPipeline { "feature_extractor": [ "transformers", "CLIPImageProcessor" ], "safety_checker": [ "stable_diffusion", "StableDiffusionSafetyChecker" ], "scheduler": [ "diffusers", "PNDMScheduler" ], "text_encoder": [ "transformers", "CLIPTextModel" ], "tokenizer": [ "transformers", "CLIPTokenizer" ], "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` 파이프라인 인스턴스의 컴포넌트들을 [`stable-diffusion-v1-5/stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5)의 폴더 구조와 비교해볼 경우, 각각의 컴포넌트마다 별도의 폴더가 있음을 확인할 수 있습니다. ``` . ├── feature_extractor │ └── preprocessor_config.json ├── model_index.json ├── safety_checker │ ├── config.json │ └── pytorch_model.bin ├── scheduler │ └── scheduler_config.json ├── text_encoder │ ├── config.json │ └── pytorch_model.bin ├── tokenizer │ ├── merges.txt │ ├── special_tokens_map.json │ ├── tokenizer_config.json │ └── vocab.json ├── unet │ ├── config.json │ ├── diffusion_pytorch_model.bin └── vae ├── config.json ├── diffusion_pytorch_model.bin ``` 또한 각각의 컴포넌트들을 파이프라인 인스턴스의 속성으로써 참조할 수 있습니다. ```py pipeline.tokenizer ``` ```python CLIPTokenizer( name_or_path="/root/.cache/huggingface/hub/models--runwayml--stable-diffusion-v1-5/snapshots/39593d5650112b4cc580433f6b0435385882d819/tokenizer", vocab_size=49408, model_max_length=77, is_fast=False, padding_side="right", truncation_side="right", special_tokens={ "bos_token": AddedToken("<|startoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), "eos_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), "unk_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), "pad_token": "<|endoftext|>", }, ) ``` 모든 파이프라인은 `model_index.json` 파일을 통해 [`DiffusionPipeline`]에 다음과 같은 정보를 전달합니다. - `_class_name` 는 어떤 파이프라인 클래스를 사용해야 하는지에 대해 알려줍니다. - `_diffusers_version`는 어떤 버전의 디퓨저스로 파이프라인 안의 모델들이 만들어졌는지를 알려줍니다. - 그 다음은 각각의 컴포넌트들이 어떤 라이브러리의 어떤 클래스로 만들어졌는지에 대해 알려줍니다. (아래 예시에서 `"feature_extractor" : ["transformers", "CLIPImageProcessor"]`의 경우, `feature_extractor` 컴포넌트는 `transformers` 라이브러리의 `CLIPImageProcessor` 클래스를 통해 만들어졌다는 것을 의미합니다.) ```json { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.6.0", "feature_extractor": [ "transformers", "CLIPImageProcessor" ], "safety_checker": [ "stable_diffusion", "StableDiffusionSafetyChecker" ], "scheduler": [ "diffusers", "PNDMScheduler" ], "text_encoder": [ "transformers", "CLIPTextModel" ], "tokenizer": [ "transformers", "CLIPTokenizer" ], "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ```
diffusers/docs/source/ko/using-diffusers/loading.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/loading.md", "repo_id": "diffusers", "token_count": 14796 }
133
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # Tour rápido Modelos de difusão são treinados para remover o ruído Gaussiano aleatório passo a passo para gerar uma amostra de interesse, como uma imagem ou áudio. Isso despertou um tremendo interesse em IA generativa, e você provavelmente já viu exemplos de imagens geradas por difusão na internet. 🧨 Diffusers é uma biblioteca que visa tornar os modelos de difusão amplamente acessíveis a todos. Seja você um desenvolvedor ou um usuário, esse tour rápido irá introduzir você ao 🧨 Diffusers e ajudar você a começar a gerar rapidamente! Há três componentes principais da biblioteca para conhecer: - O [`DiffusionPipeline`] é uma classe de alto nível de ponta a ponta desenhada para gerar rapidamente amostras de modelos de difusão pré-treinados para inferência. - [Modelos](./api/models) pré-treinados populares e módulos que podem ser usados como blocos de construção para criar sistemas de difusão. - Vários [Agendadores](./api/schedulers/overview) diferentes - algoritmos que controlam como o ruído é adicionado para treinamento, e como gerar imagens sem o ruído durante a inferência. Esse tour rápido mostrará como usar o [`DiffusionPipeline`] para inferência, e então mostrará como combinar um modelo e um agendador para replicar o que está acontecendo dentro do [`DiffusionPipeline`]. <Tip> Esse tour rápido é uma versão simplificada da introdução 🧨 Diffusers [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb) para ajudar você a começar rápido. Se você quer aprender mais sobre o objetivo do 🧨 Diffusers, filosofia de design, e detalhes adicionais sobre a API principal, veja o notebook! </Tip> Antes de começar, certifique-se de ter todas as bibliotecas necessárias instaladas: ```py # uncomment to install the necessary libraries in Colab #!pip install --upgrade diffusers accelerate transformers ``` - [🤗 Accelerate](https://huggingface.co/docs/accelerate/index) acelera o carregamento do modelo para geração e treinamento. - [🤗 Transformers](https://huggingface.co/docs/transformers/index) é necessário para executar os modelos mais populares de difusão, como o [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview). ## DiffusionPipeline O [`DiffusionPipeline`] é a forma mais fácil de usar um sistema de difusão pré-treinado para geração. É um sistema de ponta a ponta contendo o modelo e o agendador. Você pode usar o [`DiffusionPipeline`] pronto para muitas tarefas. Dê uma olhada na tabela abaixo para algumas tarefas suportadas, e para uma lista completa de tarefas suportadas, veja a tabela [Resumo do 🧨 Diffusers](./api/pipelines/overview#diffusers-summary). | **Tarefa** | **Descrição** | **Pipeline** | | -------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | | Unconditional Image Generation | gera uma imagem a partir do ruído Gaussiano | [unconditional_image_generation](./using-diffusers/unconditional_image_generation) | | Text-Guided Image Generation | gera uma imagem a partir de um prompt de texto | [conditional_image_generation](./using-diffusers/conditional_image_generation) | | Text-Guided Image-to-Image Translation | adapta uma imagem guiada por um prompt de texto | [img2img](./using-diffusers/img2img) | | Text-Guided Image-Inpainting | preenche a parte da máscara da imagem, dado a imagem, a máscara e o prompt de texto | [inpaint](./using-diffusers/inpaint) | | Text-Guided Depth-to-Image Translation | adapta as partes de uma imagem guiada por um prompt de texto enquanto preserva a estrutura por estimativa de profundidade | [depth2img](./using-diffusers/depth2img) | Comece criando uma instância do [`DiffusionPipeline`] e especifique qual checkpoint do pipeline você gostaria de baixar. Você pode usar o [`DiffusionPipeline`] para qualquer [checkpoint](https://huggingface.co/models?library=diffusers&sort=downloads) armazenado no Hugging Face Hub. Nesse quicktour, você carregará o checkpoint [`stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) para geração de texto para imagem. <Tip warning={true}> Para os modelos de [Stable Diffusion](https://huggingface.co/CompVis/stable-diffusion), por favor leia cuidadosamente a [licença](https://huggingface.co/spaces/CompVis/stable-diffusion-license) primeiro antes de rodar o modelo. 🧨 Diffusers implementa uma verificação de segurança: [`safety_checker`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) para prevenir conteúdo ofensivo ou nocivo, mas as capacidades de geração de imagem aprimorada do modelo podem ainda produzir conteúdo potencialmente nocivo. </Tip> Para carregar o modelo com o método [`~DiffusionPipeline.from_pretrained`]: ```python >>> from diffusers import DiffusionPipeline >>> pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) ``` O [`DiffusionPipeline`] baixa e armazena em cache todos os componentes de modelagem, tokenização, e agendamento. Você verá que o pipeline do Stable Diffusion é composto pelo [`UNet2DConditionModel`] e [`PNDMScheduler`] entre outras coisas: ```py >>> pipeline StableDiffusionPipeline { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.13.1", ..., "scheduler": [ "diffusers", "PNDMScheduler" ], ..., "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` Nós fortemente recomendamos rodar o pipeline em uma placa de vídeo, pois o modelo consiste em aproximadamente 1.4 bilhões de parâmetros. Você pode mover o objeto gerador para uma placa de vídeo, assim como você faria no PyTorch: ```python >>> pipeline.to("cuda") ``` Agora você pode passar o prompt de texto para o `pipeline` para gerar uma imagem, e então acessar a imagem sem ruído. Por padrão, a saída da imagem é embrulhada em um objeto [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class). ```python >>> image = pipeline("An image of a squirrel in Picasso style").images[0] >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/image_of_squirrel_painting.png"/> </div> Salve a imagem chamando o `save`: ```python >>> image.save("image_of_squirrel_painting.png") ``` ### Pipeline local Você também pode utilizar o pipeline localmente. A única diferença é que você precisa baixar os pesos primeiro: ```bash !git lfs install !git clone https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5 ``` Assim carregue os pesos salvos no pipeline: ```python >>> pipeline = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5", use_safetensors=True) ``` Agora você pode rodar o pipeline como você faria na seção acima. ### Troca dos agendadores Agendadores diferentes tem diferentes velocidades de retirar o ruído e compensações de qualidade. A melhor forma de descobrir qual funciona melhor para você é testar eles! Uma das principais características do 🧨 Diffusers é permitir que você troque facilmente entre agendadores. Por exemplo, para substituir o [`PNDMScheduler`] padrão com o [`EulerDiscreteScheduler`], carregue ele com o método [`~diffusers.ConfigMixin.from_config`]: ```py >>> from diffusers import EulerDiscreteScheduler >>> pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) >>> pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) ``` Tente gerar uma imagem com o novo agendador e veja se você nota alguma diferença! Na próxima seção, você irá dar uma olhada mais de perto nos componentes - o modelo e o agendador - que compõe o [`DiffusionPipeline`] e aprender como usar esses componentes para gerar uma imagem de um gato. ## Modelos A maioria dos modelos recebe uma amostra de ruído, e em cada _timestep_ ele prevê o _noise residual_ (outros modelos aprendem a prever a amostra anterior diretamente ou a velocidade ou [`v-prediction`](https://github.com/huggingface/diffusers/blob/5e5ce13e2f89ac45a0066cb3f369462a3cf1d9ef/src/diffusers/schedulers/scheduling_ddim.py#L110)), a diferença entre uma imagem menos com ruído e a imagem de entrada. Você pode misturar e combinar modelos para criar outros sistemas de difusão. Modelos são inicializados com o método [`~ModelMixin.from_pretrained`] que também armazena em cache localmente os pesos do modelo para que seja mais rápido na próxima vez que você carregar o modelo. Para o tour rápido, você irá carregar o [`UNet2DModel`], um modelo básico de geração de imagem incondicional com um checkpoint treinado em imagens de gato: ```py >>> from diffusers import UNet2DModel >>> repo_id = "google/ddpm-cat-256" >>> model = UNet2DModel.from_pretrained(repo_id, use_safetensors=True) ``` Para acessar os parâmetros do modelo, chame `model.config`: ```py >>> model.config ``` A configuração do modelo é um dicionário 🧊 congelado 🧊, o que significa que esses parâmetros não podem ser mudados depois que o modelo é criado. Isso é intencional e garante que os parâmetros usados para definir a arquitetura do modelo no início permaneçam os mesmos, enquanto outros parâmetros ainda podem ser ajustados durante a geração. Um dos parâmetros mais importantes são: - `sample_size`: a dimensão da altura e largura da amostra de entrada. - `in_channels`: o número de canais de entrada da amostra de entrada. - `down_block_types` e `up_block_types`: o tipo de blocos de downsampling e upsampling usados para criar a arquitetura UNet. - `block_out_channels`: o número de canais de saída dos blocos de downsampling; também utilizado como uma order reversa do número de canais de entrada dos blocos de upsampling. - `layers_per_block`: o número de blocks ResNet presentes em cada block UNet. Para usar o modelo para geração, crie a forma da imagem com ruído Gaussiano aleatório. Deve ter um eixo `batch` porque o modelo pode receber múltiplos ruídos aleatórios, um eixo `channel` correspondente ao número de canais de entrada, e um eixo `sample_size` para a altura e largura da imagem: ```py >>> import torch >>> torch.manual_seed(0) >>> noisy_sample = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) >>> noisy_sample.shape torch.Size([1, 3, 256, 256]) ``` Para geração, passe a imagem com ruído para o modelo e um `timestep`. O `timestep` indica o quão ruidosa a imagem de entrada é, com mais ruído no início e menos no final. Isso ajuda o modelo a determinar sua posição no processo de difusão, se está mais perto do início ou do final. Use o método `sample` para obter a saída do modelo: ```py >>> with torch.no_grad(): ... noisy_residual = model(sample=noisy_sample, timestep=2).sample ``` Para geração de exemplos reais, você precisará de um agendador para guiar o processo de retirada do ruído. Na próxima seção, você irá aprender como acoplar um modelo com um agendador. ## Agendadores Agendadores gerenciam a retirada do ruído de uma amostra ruidosa para uma amostra menos ruidosa dado a saída do modelo - nesse caso, é o `noisy_residual`. <Tip> 🧨 Diffusers é uma caixa de ferramentas para construir sistemas de difusão. Enquanto o [`DiffusionPipeline`] é uma forma conveniente de começar com um sistema de difusão pré-construído, você também pode escolher seus próprios modelos e agendadores separadamente para construir um sistema de difusão personalizado. </Tip> Para o tour rápido, você irá instanciar o [`DDPMScheduler`] com o método [`~diffusers.ConfigMixin.from_config`]: ```py >>> from diffusers import DDPMScheduler >>> scheduler = DDPMScheduler.from_config(repo_id) >>> scheduler DDPMScheduler { "_class_name": "DDPMScheduler", "_diffusers_version": "0.13.1", "beta_end": 0.02, "beta_schedule": "linear", "beta_start": 0.0001, "clip_sample": true, "clip_sample_range": 1.0, "num_train_timesteps": 1000, "prediction_type": "epsilon", "trained_betas": null, "variance_type": "fixed_small" } ``` <Tip> 💡 Perceba como o agendador é instanciado de uma configuração. Diferentemente de um modelo, um agendador não tem pesos treináveis e é livre de parâmetros! </Tip> Um dos parâmetros mais importante são: - `num_train_timesteps`: o tamanho do processo de retirar ruído ou em outras palavras, o número de _timesteps_ necessários para o processo de ruídos Gausianos aleatórios dentro de uma amostra de dados. - `beta_schedule`: o tipo de agendados de ruído para o uso de geração e treinamento. - `beta_start` e `beta_end`: para começar e terminar os valores de ruído para o agendador de ruído. Para predizer uma imagem com um pouco menos de ruído, passe o seguinte para o método do agendador [`~diffusers.DDPMScheduler.step`]: saída do modelo, `timestep`, e a atual `amostra`. ```py >>> less_noisy_sample = scheduler.step(model_output=noisy_residual, timestep=2, sample=noisy_sample).prev_sample >>> less_noisy_sample.shape ``` O `less_noisy_sample` pode ser passado para o próximo `timestep` onde ele ficará ainda com menos ruído! Vamos juntar tudo agora e visualizar o processo inteiro de retirada de ruído. Comece, criando a função que faça o pós-processamento e mostre a imagem sem ruído como uma `PIL.Image`: ```py >>> import PIL.Image >>> import numpy as np >>> def display_sample(sample, i): ... image_processed = sample.cpu().permute(0, 2, 3, 1) ... image_processed = (image_processed + 1.0) * 127.5 ... image_processed = image_processed.numpy().astype(np.uint8) ... image_pil = PIL.Image.fromarray(image_processed[0]) ... display(f"Image at step {i}") ... display(image_pil) ``` Para acelerar o processo de retirada de ruído, mova a entrada e o modelo para uma GPU: ```py >>> model.to("cuda") >>> noisy_sample = noisy_sample.to("cuda") ``` Agora, crie um loop de retirada de ruído que prediz o residual da amostra menos ruidosa, e computa a amostra menos ruidosa com o agendador: ```py >>> import tqdm >>> sample = noisy_sample >>> for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)): ... # 1. predict noise residual ... with torch.no_grad(): ... residual = model(sample, t).sample ... # 2. compute less noisy image and set x_t -> x_t-1 ... sample = scheduler.step(residual, t, sample).prev_sample ... # 3. optionally look at image ... if (i + 1) % 50 == 0: ... display_sample(sample, i + 1) ``` Sente-se e assista o gato ser gerado do nada além de ruído! 😻 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/diffusion-quicktour.png"/> </div> ## Próximos passos Esperamos que você tenha gerado algumas imagens legais com o 🧨 Diffusers neste tour rápido! Para suas próximas etapas, você pode - Treine ou faça a configuração fina de um modelo para gerar suas próprias imagens no tutorial de [treinamento](./tutorials/basic_training). - Veja exemplos oficiais e da comunidade de [scripts de treinamento ou configuração fina](https://github.com/huggingface/diffusers/tree/main/examples#-diffusers-examples) para os mais variados casos de uso. - Aprenda sobre como carregar, acessar, mudar e comparar agendadores no guia [Usando diferentes agendadores](./using-diffusers/schedulers). - Explore engenharia de prompt, otimizações de velocidade e memória, e dicas e truques para gerar imagens de maior qualidade com o guia [Stable Diffusion](./stable_diffusion). - Se aprofunde em acelerar 🧨 Diffusers com guias sobre [PyTorch otimizado em uma GPU](./optimization/fp16), e guias de inferência para rodar [Stable Diffusion em Apple Silicon (M1/M2)](./optimization/mps) e [ONNX Runtime](./optimization/onnx).
diffusers/docs/source/pt/quicktour.md/0
{ "file_path": "diffusers/docs/source/pt/quicktour.md", "repo_id": "diffusers", "token_count": 6790 }
134
<!--版权 2025 The HuggingFace Team。保留所有权利。 根据Apache许可证2.0版("许可证")授权;除非符合许可证的规定,否则不得使用此文件。 您可以在以下网址获取许可证的副本 http://www.apache.org/licenses/LICENSE-2.0 除非适用法律要求或书面同意,根据许可证分发的软件是基于"按原样"分发的,没有任何形式的明示或暗示的担保或条件。有关许可证下特定的语言管理权限和限制,请参阅许可证。 --> # 状态 块依赖于[`~modular_pipelines.PipelineState`]和[`~modular_pipelines.BlockState`]数据结构进行通信和数据共享。 | 状态 | 描述 | |-------|-------------| | [`~modular_pipelines.PipelineState`] | 维护管道执行所需的整体数据,并允许块读取和更新其数据。 | | [`~modular_pipelines.BlockState`] | 允许每个块使用来自`inputs`的必要数据执行其计算 | 本指南解释了状态如何工作以及它们如何连接块。 ## PipelineState [`~modular_pipelines.PipelineState`]是所有块的全局状态容器。它维护管道的完整运行时状态,并为块提供了一种结构化的方式来读取和写入共享数据。 [`~modular_pipelines.PipelineState`]中有两个字典用于结构化数据。 - `values`字典是一个**可变**状态,包含用户提供的输入值的副本和由块生成的中间输出值。如果一个块修改了一个`input`,它将在调用`set_block_state`后反映在`values`字典中。 ```py PipelineState( values={ 'prompt': 'a cat' 'guidance_scale': 7.0 'num_inference_steps': 25 'prompt_embeds': Tensor(dtype=torch.float32, shape=torch.Size([1, 1, 1, 1])) 'negative_prompt_embeds': None }, ) ``` ## BlockState [`~modular_pipelines.BlockState`]是[`~modular_pipelines.PipelineState`]中相关变量的局部视图,单个块需要这些变量来执行其计算。 直接作为属性访问这些变量,如`block_state.image`。 ```py BlockState( image: <PIL.Image.Image image mode=RGB size=512x512 at 0x7F3ECC494640> ) ``` 当一个块的`__call__`方法被执行时,它用`self.get_block_state(state)`检索[`BlockState`],执行其操作,并用`self.set_block_state(state, block_state)`更新[`~modular_pipelines.PipelineState`]。 ```py def __call__(self, components, state): # 检索BlockState block_state = self.get_block_state(state) # 对输入进行计算的逻辑 # 更新PipelineState self.set_block_state(state, block_state) return components, state ``` ## 状态交互 [`~modular_pipelines.PipelineState`]和[`~modular_pipelines.BlockState`]的交互由块的`inputs`和`intermediate_outputs`定义。 - `inputs`, 一个块可以修改输入 - 比如 `block_state.image` - 并且这个改变可以通过调用 `set_block_state` 全局传播到 [`~modular_pipelines.PipelineState`]。 - `intermediate_outputs`,是一个块创建的新变量。它被添加到 [`~modular_pipelines.PipelineState`] 的 `values` 字典中,并且可以作为后续块的可用变量,或者由用户作为管道的最终输出访问。
diffusers/docs/source/zh/modular_diffusers/modular_diffusers_states.md/0
{ "file_path": "diffusers/docs/source/zh/modular_diffusers/modular_diffusers_states.md", "repo_id": "diffusers", "token_count": 1834 }
135
# ParaAttention <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/flux-performance.png"> </div> <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/hunyuan-video-performance.png"> </div> 大型图像和视频生成模型,如 [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) 和 [HunyuanVideo](https://huggingface.co/tencent/HunyuanVideo),由于其规模,可能对实时应用和部署构成推理挑战。 [ParaAttention](https://github.com/chengzeyi/ParaAttention) 是一个实现了**上下文并行**和**第一块缓存**的库,可以与其他技术(如 torch.compile、fp8 动态量化)结合使用,以加速推理。 本指南将展示如何在 NVIDIA L20 GPU 上对 FLUX.1-dev 和 HunyuanVideo 应用 ParaAttention。 在我们的基线基准测试中,除了 HunyuanVideo 为避免内存不足错误外,未应用任何优化。 我们的基线基准测试显示,FLUX.1-dev 能够在 28 步中生成 1024x1024 分辨率图像,耗时 26.36 秒;HunyuanVideo 能够在 30 步中生成 129 帧 720p 分辨率视频,耗时 3675.71 秒。 > [!TIP] > 对于更快的上下文并行推理,请尝试使用支持 NVLink 的 NVIDIA A100 或 H100 GPU(如果可用),尤其是在 GPU 数量较多时。 ## 第一块缓存 缓存模型中 transformer 块的输出并在后续推理步骤中重用它们,可以降低计算成本并加速推理。 然而,很难决定何时重用缓存以确保生成图像或视频的质量。ParaAttention 直接使用**第一个 transformer 块输出的残差差异**来近似模型输出之间的差异。当差异足够小时,重用先前推理步骤的残差差异。换句话说,跳过去噪步骤。 这在 FLUX.1-dev 和 HunyuanVideo 推理上实现了 2 倍加速,且质量非常好。 <figure> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/ada-cache.png" alt="Cache in Diffusion Transformer" /> <figcaption>AdaCache 的工作原理,第一块缓存是其变体</figcaption> </figure> <hfoptions id="first-block-cache"> <hfoption id="FLUX-1.dev"> 要在 FLUX.1-dev 上应用第一块缓存,请调用 `apply_cache_on_pipe`,如下所示。0.08 是 FLUX 模型的默认残差差异值。 ```python import time import torch from diffusers import FluxPipeline pipe = FluxPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16, ).to("cuda") from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe apply_cache_on_pipe(pipe, residual_diff_thre shold=0.08) # 启用内存节省 # pipe.enable_model_cpu_offload() # pipe.enable_sequential_cpu_offload() begin = time.time() image = pipe( "A cat holding a sign that says hello world", num_inference_steps=28, ).images[0] end = time.time() print(f"Time: {end - begin:.2f}s") print("Saving image to flux.png") image.save("flux.png") ``` | 优化 | 原始 | FBCache rdt=0.06 | FBCache rdt=0.08 | FBCache rdt=0.10 | FBCache rdt=0.12 | | - | - | - | - | - | - | | 预览 | ![Original](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/flux-original.png) | ![FBCache rdt=0.06](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/flux-fbc-0.06.png) | ![FBCache rdt=0.08](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/flux-fbc-0.08.png) | ![FBCache rdt=0.10](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/flux-fbc-0.10.png) | ![FBCache rdt=0.12](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/flux-fbc-0.12.png) | | 墙时间 (s) | 26.36 | 21.83 | 17.01 | 16.00 | 13.78 | First Block Cache 将推理速度降低到 17.01 秒,与基线相比,或快 1.55 倍,同时保持几乎零质量损失。 </hfoption> <hfoption id="HunyuanVideo"> 要在 HunyuanVideo 上应用 First Block Cache,请使用 `apply_cache_on_pipe`,如下所示。0.06 是 HunyuanVideo 模型的默认残差差值。 ```python import time import torch from diffusers import HunyuanVideoPipeline, HunyuanVideoTransformer3DModel from diffusers.utils import export_to_video model_id = "tencent/HunyuanVideo" transformer = HunyuanVideoTransformer3DModel.from_pretrained( model_id, subfolder="transformer", torch_dtype=torch.bfloat16, revision="refs/pr/18", ) pipe = HunyuanVideoPipeline.from_pretrained( model_id, transformer=transformer, torch_dtype=torch.float16, revision="refs/pr/18", ).to("cuda") from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe apply_cache_on_pipe(pipe, residual_diff_threshold=0.6) pipe.vae.enable_tiling() begin = time.time() output = pipe( prompt="A cat walks on the grass, realistic", height=720, width=1280, num_frames=129, num_inference_steps=30, ).frames[0] end = time.time() print(f"Time: {end - begin:.2f}s") print("Saving video to hunyuan_video.mp4") export_to_video(output, "hunyuan_video.mp4", fps=15) ``` <video controls> <source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/hunyuan-video-original.mp4" type="video/mp4"> 您的浏览器不支持视频标签。 </video> <small> HunyuanVideo 无 FBCache </small> <video controls> <source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/hunyuan-video-fbc.mp4" type="video/mp4"> Your browser does not support the video tag. </video> <small> HunyuanVideo 与 FBCache </small> First Block Cache 将推理速度降低至 2271.06 秒,相比基线快了 1.62 倍,同时保持了几乎为零的质量损失。 </hfoption> </hfoptions> ## fp8 量化 fp8 动态量化进一步加速推理并减少内存使用。为了使用 8 位 [NVIDIA Tensor Cores](https://www.nvidia.com/en-us/data-center/tensor-cores/),必须对激活和权重进行量化。 使用 `float8_weight_only` 和 `float8_dynamic_activation_float8_weight` 来量化文本编码器和变换器模型。 默认量化方法是逐张量量化,但如果您的 GPU 支持逐行量化,您也可以尝试它以获得更好的准确性。 使用以下命令安装 [torchao](https://github.com/pytorch/ao/tree/main)。 ```bash pip3 install -U torch torchao ``` [torch.compile](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) 使用 `mode="max-autotune-no-cudagraphs"` 或 `mode="max-autotune"` 选择最佳内核以获得性能。如果是第一次调用模型,编译可能会花费很长时间,但一旦模型编译完成,这是值得的。 此示例仅量化变换器模型,但您也可以量化文本编码器以进一步减少内存使用。 > [!TIP] > 动态量化可能会显著改变模型输出的分布,因此您需要将 `residual_diff_threshold` 设置为更大的值以使其生效。 <hfoptions id="fp8-quantization"> <hfoption id="FLUX-1.dev"> ```python import time import torch from diffusers import FluxPipeline pipe = FluxPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16, ).to("cuda") from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe apply_cache_on_pipe( pipe, residual_diff_threshold=0.12, # 使用更大的值以使缓存生效 ) from torchao.quantization import quantize_, float8_dynamic_activation_float8_weight, float8_weight_only quantize_(pipe.text_encoder, float8_weight_only()) quantize_(pipe.transformer, float8_dynamic_activation_float8_weight()) pipe.transformer = torch.compile( pipe.transformer, mode="max-autotune-no-cudagraphs", ) # 启用内存节省 # pipe.enable_model_cpu_offload() # pipe.enable_sequential_cpu_offload() for i in range(2): begin = time.time() image = pipe( "A cat holding a sign that says hello world", num_inference_steps=28, ).images[0] end = time.time() if i == 0: print(f"预热时间: {end - begin:.2f}s") else: print(f"时间: {end - begin:.2f}s") print("保存图像到 flux.png") image.save("flux.png") ``` fp8 动态量化和 torch.compile 将推理速度降低至 7.56 秒,相比基线快了 3.48 倍。 </hfoption> <hfoption id="HunyuanVideo"> ```python import time import torch from diffusers import HunyuanVideoPipeline, HunyuanVideoTransformer3DModel from diffusers.utils import export_to_video model_id = "tencent/HunyuanVideo" transformer = HunyuanVideoTransformer3DModel.from_pretrained( model_id, subfolder="transformer", torch_dtype=torch.bfloat16, revision="refs/pr/18", ) pipe = HunyuanVideoPipeline.from_pretrained( model_id, transformer=transformer, torch_dtype=torch.float16, revision="refs/pr/18", ).to("cuda") from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe apply_cache_on_pipe(pipe) from torchao.quantization import quantize_, float8_dynamic_activation_float8_weight, float8_weight_only quantize_(pipe.text_encoder, float8_weight_only()) quantize_(pipe.transformer, float8_dynamic_activation_float8_weight()) pipe.transformer = torch.compile( pipe.transformer, mode="max-autotune-no-cudagraphs", ) # Enable memory savings pipe.vae.enable_tiling() # pipe.enable_model_cpu_offload() # pipe.enable_sequential_cpu_offload() for i in range(2): begin = time.time() output = pipe( prompt="A cat walks on the grass, realistic", height=720, width=1280, num_frames=129, num_inference_steps=1 if i == 0 else 30, ).frames[0] end = time.time() if i == 0: print(f"Warm up time: {end - begin:.2f}s") else: print(f"Time: {end - begin:.2f}s") print("Saving video to hunyuan_video.mp4") export_to_video(output, "hunyuan_video.mp4", fps=15) ``` NVIDIA L20 GPU 仅有 48GB 内存,在编译后且如果未调用 `enable_model_cpu_offload` 时,可能会遇到内存不足(OOM)错误,因为 HunyuanVideo 在高分辨率和大量帧数运行时具有非常大的激活张量。对于内存少于 80GB 的 GPU,可以尝试降低分辨率和帧数来避免 OOM 错误。 大型视频生成模型通常受注意力计算而非全连接层的瓶颈限制。这些模型不会从量化和 torch.compile 中显著受益。 </hfoption> </hfoptions> ## 上下文并行性 上下文并行性并行化推理并随多个 GPU 扩展。ParaAttention 组合设计允许您将上下文并行性与第一块缓存和动态量化结合使用。 > [!TIP] > 请参考 [ParaAttention](https://github.com/chengzeyi/ParaAttention/tree/main) 仓库获取详细说明和如何使用多个 GPU 扩展推理的示例。 如果推理过程需要持久化和可服务,建议使用 [torch.multiprocessing](https://pytorch.org/docs/stable/multiprocessing.html) 编写您自己的推理处理器。这可以消除启动进程以及加载和重新编译模型的开销。 <hfoptions id="context-parallelism"> <hfoption id="FLUX-1.dev"> 以下代码示例结合了第一块缓存、fp8动态量化、torch.compile和上下文并行,以实现最快的推理速度。 ```python import time import torch import torch.distributed as dist from diffusers import FluxPipeline dist.init_process_group() torch.cuda.set_device(dist.get_rank()) pipe = FluxPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16, ).to("cuda") from para_attn.context_parallel import init_context_parallel_mesh from para_attn.context_parallel.diffusers_adapters import parallelize_pipe from para_attn.parallel_vae.diffusers_adapters import parallelize_vae mesh = init_context_parallel_mesh( pipe.device.type, max_ring_dim_size=2, ) parallelize_pipe( pipe, mesh=mesh, ) parallelize_vae(pipe.vae, mesh=mesh._flatten()) from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe apply_cache_on_pipe( pipe, residual_diff_threshold=0.12, # 使用较大的值以使缓存生效 ) from torchao.quantization import quantize_, float8_dynamic_activation_float8_weight, float8_weight_only quantize_(pipe.text_encoder, float8_weight_only()) quantize_(pipe.transformer, float8_dynamic_activation_float8_weight()) torch._inductor.config.reorder_for_compute_comm_overlap = True pipe.transformer = torch.compile( pipe.transformer, mode="max-autotune-no-cudagraphs", ) # 启用内存节省 # pipe.enable_model_cpu_offload(gpu_id=dist.get_rank()) # pipe.enable_sequential_cpu_offload(gpu_id=dist.get_rank()) for i in range(2): begin = time.time() image = pipe( "A cat holding a sign that says hello world", num_inference_steps=28, output_type="pil" if dist.get_rank() == 0 else "pt", ).images[0] end = time.time() if dist.get_rank() == 0: if i == 0: print(f"预热时间: {end - begin:.2f}s") else: print(f"时间: {end - begin:.2f}s") if dist.get_rank() == 0: print("将图像保存到flux.png") image.save("flux.png") dist.destroy_process_group() ``` 保存到`run_flux.py`并使用[torchrun](https://pytorch.org/docs/stable/elastic/run.html)启动。 ```bash # 使用--nproc_per_node指定GPU数量 torchrun --nproc_per_node=2 run_flux.py ``` 推理速度降至8.20秒,相比基线快了3.21倍,使用2个NVIDIA L20 GPU。在4个L20上,推理速度为3.90秒,快了6.75倍。 </hfoption> <hfoption id="HunyuanVideo"> 以下代码示例结合了第一块缓存和上下文并行,以实现最快的推理速度。 ```python import time import torch import torch.distributed as dist from diffusers import HunyuanVideoPipeline, HunyuanVideoTransformer3DModel from diffusers.utils import export_to_video dist.init_process_group() torch.cuda.set_device(dist.get_rank()) model_id = "tencent/HunyuanVideo" transformer = HunyuanVideoTransformer3DModel.from_pretrained( model_id, subfolder="transformer", torch_dtype=torch.bfloat16, revision="refs/pr/18", ) pipe = HunyuanVideoPipeline.from_pretrained( model_id, transformer=transformer, torch_dtype=torch.float16, revision="refs/pr/18", ).to("cuda") from para_attn.context_parallel import init_context_parallel_mesh from para_attn.context_parallel.diffusers_adapters import parallelize_pipe from para_attn.parallel_vae.diffusers_adapters import parallelize_vae mesh = init_context_parallel_mesh( pipe.device.type, ) parallelize_pipe( pipe, mesh=mesh, ) parallelize_vae(pipe.vae, mesh=mesh._flatten()) from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe apply_cache_on_pipe(pipe) # from torchao.quantization import quantize_, float8_dynamic_activation_float8_weight, float8_weight_only # # torch._inductor.config.reorder_for_compute_comm_overlap = True # # quantize_(pipe.text_encoder, float8_weight_only()) # quantize_(pipe.transformer, float8_dynamic_activation_float8_weight()) # pipe.transformer = torch.compile( # pipe.transformer, mode="max-autotune-no-cudagraphs", # ) # 启用内存节省 pipe.vae.enable_tiling() # pipe.enable_model_cpu_offload(gpu_id=dist.get_rank()) # pipe.enable_sequential_cpu_offload(gpu_id=dist.get_rank()) for i in range(2): begin = time.time() output = pipe( prompt="A cat walks on the grass, realistic", height=720, width=1280, num_frames=129, num_inference_steps=1 if i == 0 else 30, output_type="pil" if dist.get_rank() == 0 else "pt", ).frames[0] end = time.time() if dist.get_rank() == 0: if i == 0: print(f"预热时间: {end - begin:.2f}s") else: print(f"时间: {end - begin:.2f}s") if dist.get_rank() == 0: print("保存视频到 hunyuan_video.mp4") export_to_video(output, "hunyuan_video.mp4", fps=15) dist.destroy_process_group() ``` 保存到 `run_hunyuan_video.py` 并使用 [torchrun](https://pytorch.org/docs/stable/elastic/run.html) 启动。 ```bash # 使用 --nproc_per_node 指定 GPU 数量 torchrun --nproc_per_node=8 run_hunyuan_video.py ``` 推理速度降低到 649.23 秒,相比基线快 5.66 倍,使用 8 个 NVIDIA L20 GPU。 </hfoption> </hfoptions> ## 基准测试 <hfoptions id="conclusion"> <hfoption id="FLUX-1.dev"> | GPU 类型 | GPU 数量 | 优化 | 墙钟时间 (s) | 加速比 | | - | - | - | - | - | | NVIDIA L20 | 1 | 基线 | 26.36 | 1.00x | | NVIDIA L20 | 1 | FBCache (rdt=0.08) | 17.01 | 1.55x | | NVIDIA L20 | 1 | FP8 DQ | 13.40 | 1.96x | | NVIDIA L20 | 1 | FBCache (rdt=0.12) + FP8 DQ | 7.56 | 3.48x | | NVIDIA L20 | 2 | FBCache (rdt=0.12) + FP8 DQ + CP | 4.92 | 5.35x | | NVIDIA L20 | 4 | FBCache (rdt=0.12) + FP8 DQ + CP | 3.90 | 6.75x | </hfoption> <hfoption id="HunyuanVideo"> | GPU 类型 | GPU 数量 | 优化 | 墙钟时间 (s) | 加速比 | | - | - | - | - | - | | NVIDIA L20 | 1 | 基线 | 3675.71 | 1.00x | | NVIDIA L20 | 1 | FBCache | 2271.06 | 1.62x | | NVIDIA L20 | 2 | FBCache + CP | 1132.90 | 3.24x | | NVIDIA L20 | 4 | FBCache + CP | 718.15 | 5.12x | | NVIDIA L20 | 8 | FBCache + CP | 649.23 | 5.66x | </hfoption> </hfoptions>
diffusers/docs/source/zh/optimization/para_attn.md/0
{ "file_path": "diffusers/docs/source/zh/optimization/para_attn.md", "repo_id": "diffusers", "token_count": 8534 }
136
<!--Copyright 2025 The HuggingFace Team. All rights reserved. 根据 Apache License 2.0 版本("许可证")授权,除非符合许可证要求,否则不得使用此文件。您可以通过以下网址获取许可证副本: http://www.apache.org/licenses/LICENSE-2.0 除非适用法律要求或书面同意,本软件按"原样"分发,不附带任何明示或暗示的担保或条件。详见许可证中规定的特定语言权限和限制。 --> # 概述 🤗 Diffusers 提供了一系列训练脚本供您训练自己的diffusion模型。您可以在 [diffusers/examples](https://github.com/huggingface/diffusers/tree/main/examples) 找到所有训练脚本。 每个训练脚本具有以下特点: - **独立完整**:训练脚本不依赖任何本地文件,所有运行所需的包都通过 `requirements.txt` 文件安装 - **易于调整**:这些脚本是针对特定任务的训练示例,并不能开箱即用地适用于所有训练场景。您可能需要根据具体用例调整脚本。为此,我们完全公开了数据预处理代码和训练循环,方便您进行修改 - **新手友好**:脚本设计注重易懂性和入门友好性,而非包含最新最优方法以获得最具竞争力的结果。我们有意省略了过于复杂的训练方法 - **单一用途**:每个脚本仅针对一个任务设计,确保代码可读性和可理解性 当前提供的训练脚本包括: | 训练类型 | 支持SDXL | 支持LoRA | 支持Flax | |---|---|---|---| | [unconditional image generation](https://github.com/huggingface/diffusers/tree/main/examples/unconditional_image_generation) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) | | | | | [text-to-image](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image) | 👍 | 👍 | 👍 | | [textual inversion](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) | | | 👍 | | [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb) | 👍 | 👍 | 👍 | | [ControlNet](https://github.com/huggingface/diffusers/tree/main/examples/controlnet) | 👍 | | 👍 | | [InstructPix2Pix](https://github.com/huggingface/diffusers/tree/main/examples/instruct_pix2pix) | 👍 | | | | [Custom Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/custom_diffusion) | | | | | [T2I-Adapters](https://github.com/huggingface/diffusers/tree/main/examples/t2i_adapter) | 👍 | | | | [Kandinsky 2.2](https://github.com/huggingface/diffusers/tree/main/examples/kandinsky2_2/text_to_image) | | 👍 | | | [Wuerstchen](https://github.com/huggingface/diffusers/tree/main/examples/wuerstchen/text_to_image) | | 👍 | | 这些示例处于**积极维护**状态,如果遇到问题请随时提交issue。如果您认为应该添加其他训练示例,欢迎创建[功能请求](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=)与我们讨论,我们将评估其是否符合独立完整、易于调整、新手友好和单一用途的标准。 ## 安装 请按照以下步骤在新虚拟环境中从源码安装库,确保能成功运行最新版本的示例脚本: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` 然后进入具体训练脚本目录(例如[DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth)),安装对应的`requirements.txt`文件。部分脚本针对SDXL、LoRA或Flax有特定要求文件,使用时请确保安装对应文件。 ```bash cd examples/dreambooth pip install -r requirements.txt # 如需用DreamBooth训练SDXL pip install -r requirements_sdxl.txt ``` 为加速训练并降低内存消耗,我们建议: - 使用PyTorch 2.0或更高版本,自动启用[缩放点积注意力](../optimization/fp16#scaled-dot-product-attention)(无需修改训练代码) - 安装[xFormers](../optimization/xformers)以启用内存高效注意力机制
diffusers/docs/source/zh/training/overview.md/0
{ "file_path": "diffusers/docs/source/zh/training/overview.md", "repo_id": "diffusers", "token_count": 2461 }
137
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import copy import logging import math import os import shutil from contextlib import nullcontext from pathlib import Path import torch import torch.nn.functional as F from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from datasets import load_dataset from peft import LoraConfig from peft.utils import get_peft_model_state_dict from PIL import Image from PIL.ImageOps import exif_transpose from torch.utils.data import DataLoader, Dataset, default_collate from torchvision import transforms from transformers import ( CLIPTextModelWithProjection, CLIPTokenizer, ) import diffusers.optimization from diffusers import AmusedPipeline, AmusedScheduler, EMAModel, UVit2DModel, VQModel from diffusers.loaders import AmusedLoraLoaderMixin from diffusers.utils import is_wandb_available if is_wandb_available(): import wandb logger = get_logger(__name__, log_level="INFO") def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--instance_data_dataset", type=str, default=None, required=False, help="A Hugging Face dataset containing the training images", ) parser.add_argument( "--instance_data_dir", type=str, default=None, required=False, help="A folder containing the training data of instance images.", ) parser.add_argument( "--instance_data_image", type=str, default=None, required=False, help="A single training image" ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") parser.add_argument("--ema_decay", type=float, default=0.9999) parser.add_argument("--ema_update_after_step", type=int, default=0) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument( "--output_dir", type=str, default="muse_training", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. Checkpoints can be used for resuming training via `--resume_from_checkpoint`. " "In the case that the checkpoint is better than the final trained model, the checkpoint can also be used for inference." "Using a checkpoint for inference requires separate loading of the original pipeline and the individual checkpointed model components." "See https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint for step by step" "instructions." ), ) parser.add_argument( "--logging_steps", type=int, default=50, ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=( "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`." " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state" " for more details" ), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--learning_rate", type=float, default=0.0003, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run validation every X steps. Validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`" " and logging the images." ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--report_to", type=str, default="wandb", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument("--validation_prompts", type=str, nargs="*") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument("--split_vae_encode", type=int, required=False, default=None) parser.add_argument("--min_masking_rate", type=float, default=0.0) parser.add_argument("--cond_dropout_prob", type=float, default=0.0) parser.add_argument("--max_grad_norm", default=None, type=float, help="Max gradient norm.", required=False) parser.add_argument("--use_lora", action="store_true", help="Fine tune the model using LoRa") parser.add_argument("--text_encoder_use_lora", action="store_true", help="Fine tune the model using LoRa") parser.add_argument("--lora_r", default=16, type=int) parser.add_argument("--lora_alpha", default=32, type=int) parser.add_argument("--lora_target_modules", default=["to_q", "to_k", "to_v"], type=str, nargs="+") parser.add_argument("--text_encoder_lora_r", default=16, type=int) parser.add_argument("--text_encoder_lora_alpha", default=32, type=int) parser.add_argument("--text_encoder_lora_target_modules", default=["to_q", "to_k", "to_v"], type=str, nargs="+") parser.add_argument("--train_text_encoder", action="store_true") parser.add_argument("--image_key", type=str, required=False) parser.add_argument("--prompt_key", type=str, required=False) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument("--prompt_prefix", type=str, required=False, default=None) args = parser.parse_args() if args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") num_datasources = sum( [x is not None for x in [args.instance_data_dir, args.instance_data_image, args.instance_data_dataset]] ) if num_datasources != 1: raise ValueError( "provide one and only one of `--instance_data_dir`, `--instance_data_image`, or `--instance_data_dataset`" ) if args.instance_data_dir is not None: if not os.path.exists(args.instance_data_dir): raise ValueError(f"Does not exist: `--args.instance_data_dir` {args.instance_data_dir}") if args.instance_data_image is not None: if not os.path.exists(args.instance_data_image): raise ValueError(f"Does not exist: `--args.instance_data_image` {args.instance_data_image}") if args.instance_data_dataset is not None and (args.image_key is None or args.prompt_key is None): raise ValueError("`--instance_data_dataset` requires setting `--image_key` and `--prompt_key`") return args class InstanceDataRootDataset(Dataset): def __init__( self, instance_data_root, tokenizer, size=512, ): self.size = size self.tokenizer = tokenizer self.instance_images_path = list(Path(instance_data_root).iterdir()) def __len__(self): return len(self.instance_images_path) def __getitem__(self, index): image_path = self.instance_images_path[index % len(self.instance_images_path)] instance_image = Image.open(image_path) rv = process_image(instance_image, self.size) prompt = os.path.splitext(os.path.basename(image_path))[0] rv["prompt_input_ids"] = tokenize_prompt(self.tokenizer, prompt)[0] return rv class InstanceDataImageDataset(Dataset): def __init__( self, instance_data_image, train_batch_size, size=512, ): self.value = process_image(Image.open(instance_data_image), size) self.train_batch_size = train_batch_size def __len__(self): # Needed so a full batch of the data can be returned. Otherwise will return # batches of size 1 return self.train_batch_size def __getitem__(self, index): return self.value class HuggingFaceDataset(Dataset): def __init__( self, hf_dataset, tokenizer, image_key, prompt_key, prompt_prefix=None, size=512, ): self.size = size self.image_key = image_key self.prompt_key = prompt_key self.tokenizer = tokenizer self.hf_dataset = hf_dataset self.prompt_prefix = prompt_prefix def __len__(self): return len(self.hf_dataset) def __getitem__(self, index): item = self.hf_dataset[index] rv = process_image(item[self.image_key], self.size) prompt = item[self.prompt_key] if self.prompt_prefix is not None: prompt = self.prompt_prefix + prompt rv["prompt_input_ids"] = tokenize_prompt(self.tokenizer, prompt)[0] return rv def process_image(image, size): image = exif_transpose(image) if not image.mode == "RGB": image = image.convert("RGB") orig_height = image.height orig_width = image.width image = transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR)(image) c_top, c_left, _, _ = transforms.RandomCrop.get_params(image, output_size=(size, size)) image = transforms.functional.crop(image, c_top, c_left, size, size) image = transforms.ToTensor()(image) micro_conds = torch.tensor( [orig_width, orig_height, c_top, c_left, 6.0], ) return {"image": image, "micro_conds": micro_conds} def tokenize_prompt(tokenizer, prompt): return tokenizer( prompt, truncation=True, padding="max_length", max_length=77, return_tensors="pt", ).input_ids def encode_prompt(text_encoder, input_ids): outputs = text_encoder(input_ids, return_dict=True, output_hidden_states=True) encoder_hidden_states = outputs.hidden_states[-2] cond_embeds = outputs[0] return encoder_hidden_states, cond_embeds def main(args): if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) # Disable AMP for MPS. if torch.backends.mps.is_available(): accelerator.native_amp = False if accelerator.is_main_process: os.makedirs(args.output_dir, exist_ok=True) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_main_process: accelerator.init_trackers("amused", config=vars(copy.deepcopy(args))) if args.seed is not None: set_seed(args.seed) # TODO - will have to fix loading if training text encoder text_encoder = CLIPTextModelWithProjection.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant ) tokenizer = CLIPTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, variant=args.variant ) vq_model = VQModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="vqvae", revision=args.revision, variant=args.variant ) if args.train_text_encoder: if args.text_encoder_use_lora: lora_config = LoraConfig( r=args.text_encoder_lora_r, lora_alpha=args.text_encoder_lora_alpha, target_modules=args.text_encoder_lora_target_modules, ) text_encoder.add_adapter(lora_config) text_encoder.train() text_encoder.requires_grad_(True) else: text_encoder.eval() text_encoder.requires_grad_(False) vq_model.requires_grad_(False) model = UVit2DModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="transformer", revision=args.revision, variant=args.variant, ) if args.use_lora: lora_config = LoraConfig( r=args.lora_r, lora_alpha=args.lora_alpha, target_modules=args.lora_target_modules, ) model.add_adapter(lora_config) model.train() if args.gradient_checkpointing: model.enable_gradient_checkpointing() if args.train_text_encoder: text_encoder.gradient_checkpointing_enable() if args.use_ema: ema = EMAModel( model.parameters(), decay=args.ema_decay, update_after_step=args.ema_update_after_step, model_cls=UVit2DModel, model_config=model.config, ) def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: transformer_lora_layers_to_save = None text_encoder_lora_layers_to_save = None for model_ in models: if isinstance(model_, type(accelerator.unwrap_model(model))): if args.use_lora: transformer_lora_layers_to_save = get_peft_model_state_dict(model_) else: model_.save_pretrained(os.path.join(output_dir, "transformer")) elif isinstance(model_, type(accelerator.unwrap_model(text_encoder))): if args.text_encoder_use_lora: text_encoder_lora_layers_to_save = get_peft_model_state_dict(model_) else: model_.save_pretrained(os.path.join(output_dir, "text_encoder")) else: raise ValueError(f"unexpected save model: {model_.__class__}") # make sure to pop weight so that corresponding model is not saved again weights.pop() if transformer_lora_layers_to_save is not None or text_encoder_lora_layers_to_save is not None: AmusedLoraLoaderMixin.save_lora_weights( output_dir, transformer_lora_layers=transformer_lora_layers_to_save, text_encoder_lora_layers=text_encoder_lora_layers_to_save, ) if args.use_ema: ema.save_pretrained(os.path.join(output_dir, "ema_model")) def load_model_hook(models, input_dir): transformer = None text_encoder_ = None while len(models) > 0: model_ = models.pop() if isinstance(model_, type(accelerator.unwrap_model(model))): if args.use_lora: transformer = model_ else: load_model = UVit2DModel.from_pretrained(os.path.join(input_dir, "transformer")) model_.load_state_dict(load_model.state_dict()) del load_model elif isinstance(model, type(accelerator.unwrap_model(text_encoder))): if args.text_encoder_use_lora: text_encoder_ = model_ else: load_model = CLIPTextModelWithProjection.from_pretrained(os.path.join(input_dir, "text_encoder")) model_.load_state_dict(load_model.state_dict()) del load_model else: raise ValueError(f"unexpected save model: {model.__class__}") if transformer is not None or text_encoder_ is not None: lora_state_dict, network_alphas = AmusedLoraLoaderMixin.lora_state_dict(input_dir) AmusedLoraLoaderMixin.load_lora_into_text_encoder( lora_state_dict, network_alphas=network_alphas, text_encoder=text_encoder_ ) AmusedLoraLoaderMixin.load_lora_into_transformer( lora_state_dict, network_alphas=network_alphas, transformer=transformer ) if args.use_ema: load_from = EMAModel.from_pretrained(os.path.join(input_dir, "ema_model"), model_cls=UVit2DModel) ema.load_state_dict(load_from.state_dict()) del load_from accelerator.register_load_state_pre_hook(load_model_hook) accelerator.register_save_state_pre_hook(save_model_hook) if args.scale_lr: args.learning_rate = ( args.learning_rate * args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps ) if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) optimizer_cls = bnb.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW # no decay on bias and layernorm and embedding no_decay = ["bias", "layer_norm.weight", "mlm_ln.weight", "embeddings.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.adam_weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] if args.train_text_encoder: optimizer_grouped_parameters.append( {"params": text_encoder.parameters(), "weight_decay": args.adam_weight_decay} ) optimizer = optimizer_cls( optimizer_grouped_parameters, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) logger.info("Creating dataloaders and lr_scheduler") total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps if args.instance_data_dir is not None: dataset = InstanceDataRootDataset( instance_data_root=args.instance_data_dir, tokenizer=tokenizer, size=args.resolution, ) elif args.instance_data_image is not None: dataset = InstanceDataImageDataset( instance_data_image=args.instance_data_image, train_batch_size=args.train_batch_size, size=args.resolution, ) elif args.instance_data_dataset is not None: dataset = HuggingFaceDataset( hf_dataset=load_dataset(args.instance_data_dataset, split="train"), tokenizer=tokenizer, image_key=args.image_key, prompt_key=args.prompt_key, prompt_prefix=args.prompt_prefix, size=args.resolution, ) else: assert False train_dataloader = DataLoader( dataset, batch_size=args.train_batch_size, shuffle=True, num_workers=args.dataloader_num_workers, collate_fn=default_collate, ) train_dataloader.num_batches = len(train_dataloader) lr_scheduler = diffusers.optimization.get_scheduler( args.lr_scheduler, optimizer=optimizer, num_training_steps=args.max_train_steps * accelerator.num_processes, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, ) logger.info("Preparing model, optimizer and dataloaders") if args.train_text_encoder: model, optimizer, lr_scheduler, train_dataloader, text_encoder = accelerator.prepare( model, optimizer, lr_scheduler, train_dataloader, text_encoder ) else: model, optimizer, lr_scheduler, train_dataloader = accelerator.prepare( model, optimizer, lr_scheduler, train_dataloader ) train_dataloader.num_batches = len(train_dataloader) weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 if not args.train_text_encoder: text_encoder.to(device=accelerator.device, dtype=weight_dtype) vq_model.to(device=accelerator.device) if args.use_ema: ema.to(accelerator.device) with nullcontext() if args.train_text_encoder else torch.no_grad(): empty_embeds, empty_clip_embeds = encode_prompt( text_encoder, tokenize_prompt(tokenizer, "").to(text_encoder.device, non_blocking=True) ) # There is a single image, we can just pre-encode the single prompt if args.instance_data_image is not None: prompt = os.path.splitext(os.path.basename(args.instance_data_image))[0] encoder_hidden_states, cond_embeds = encode_prompt( text_encoder, tokenize_prompt(tokenizer, prompt).to(text_encoder.device, non_blocking=True) ) encoder_hidden_states = encoder_hidden_states.repeat(args.train_batch_size, 1, 1) cond_embeds = cond_embeds.repeat(args.train_batch_size, 1) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(train_dataloader.num_batches / args.gradient_accumulation_steps) # Afterwards we recalculate our number of training epochs. # Note: We are not doing epoch based training here, but just using this for book keeping and being able to # reuse the same training loop with other datasets/loaders. num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Train! logger.info("***** Running training *****") logger.info(f" Num training steps = {args.max_train_steps}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") resume_from_checkpoint = args.resume_from_checkpoint if resume_from_checkpoint: if resume_from_checkpoint == "latest": # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) if len(dirs) > 0: resume_from_checkpoint = os.path.join(args.output_dir, dirs[-1]) else: resume_from_checkpoint = None if resume_from_checkpoint is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) else: accelerator.print(f"Resuming from checkpoint {resume_from_checkpoint}") if resume_from_checkpoint is None: global_step = 0 first_epoch = 0 else: accelerator.load_state(resume_from_checkpoint) global_step = int(os.path.basename(resume_from_checkpoint).split("-")[1]) first_epoch = global_step // num_update_steps_per_epoch # As stated above, we are not doing epoch based training here, but just using this for book keeping and being able to # reuse the same training loop with other datasets/loaders. for epoch in range(first_epoch, num_train_epochs): for batch in train_dataloader: with torch.no_grad(): micro_conds = batch["micro_conds"].to(accelerator.device, non_blocking=True) pixel_values = batch["image"].to(accelerator.device, non_blocking=True) batch_size = pixel_values.shape[0] split_batch_size = args.split_vae_encode if args.split_vae_encode is not None else batch_size num_splits = math.ceil(batch_size / split_batch_size) image_tokens = [] for i in range(num_splits): start_idx = i * split_batch_size end_idx = min((i + 1) * split_batch_size, batch_size) bs = pixel_values.shape[0] image_tokens.append( vq_model.quantize(vq_model.encode(pixel_values[start_idx:end_idx]).latents)[2][2].reshape( bs, -1 ) ) image_tokens = torch.cat(image_tokens, dim=0) batch_size, seq_len = image_tokens.shape timesteps = torch.rand(batch_size, device=image_tokens.device) mask_prob = torch.cos(timesteps * math.pi * 0.5) mask_prob = mask_prob.clip(args.min_masking_rate) num_token_masked = (seq_len * mask_prob).round().clamp(min=1) batch_randperm = torch.rand(batch_size, seq_len, device=image_tokens.device).argsort(dim=-1) mask = batch_randperm < num_token_masked.unsqueeze(-1) mask_id = accelerator.unwrap_model(model).config.vocab_size - 1 input_ids = torch.where(mask, mask_id, image_tokens) labels = torch.where(mask, image_tokens, -100) if args.cond_dropout_prob > 0.0: assert encoder_hidden_states is not None batch_size = encoder_hidden_states.shape[0] mask = ( torch.zeros((batch_size, 1, 1), device=encoder_hidden_states.device).float().uniform_(0, 1) < args.cond_dropout_prob ) empty_embeds_ = empty_embeds.expand(batch_size, -1, -1) encoder_hidden_states = torch.where( (encoder_hidden_states * mask).bool(), encoder_hidden_states, empty_embeds_ ) empty_clip_embeds_ = empty_clip_embeds.expand(batch_size, -1) cond_embeds = torch.where((cond_embeds * mask.squeeze(-1)).bool(), cond_embeds, empty_clip_embeds_) bs = input_ids.shape[0] vae_scale_factor = 2 ** (len(vq_model.config.block_out_channels) - 1) resolution = args.resolution // vae_scale_factor input_ids = input_ids.reshape(bs, resolution, resolution) if "prompt_input_ids" in batch: with nullcontext() if args.train_text_encoder else torch.no_grad(): encoder_hidden_states, cond_embeds = encode_prompt( text_encoder, batch["prompt_input_ids"].to(accelerator.device, non_blocking=True) ) # Train Step with accelerator.accumulate(model): codebook_size = accelerator.unwrap_model(model).config.codebook_size logits = ( model( input_ids=input_ids, encoder_hidden_states=encoder_hidden_states, micro_conds=micro_conds, pooled_text_emb=cond_embeds, ) .reshape(bs, codebook_size, -1) .permute(0, 2, 1) .reshape(-1, codebook_size) ) loss = F.cross_entropy( logits, labels.view(-1), ignore_index=-100, reduction="mean", ) # Gather the losses across all processes for logging (if we use distributed training). avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() avg_masking_rate = accelerator.gather(mask_prob.repeat(args.train_batch_size)).mean() accelerator.backward(loss) if args.max_grad_norm is not None and accelerator.sync_gradients: accelerator.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=True) # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: if args.use_ema: ema.step(model.parameters()) if (global_step + 1) % args.logging_steps == 0: logs = { "step_loss": avg_loss.item(), "lr": lr_scheduler.get_last_lr()[0], "avg_masking_rate": avg_masking_rate.item(), } accelerator.log(logs, step=global_step + 1) logger.info( f"Step: {global_step + 1} " f"Loss: {avg_loss.item():0.4f} " f"LR: {lr_scheduler.get_last_lr()[0]:0.6f}" ) if (global_step + 1) % args.checkpointing_steps == 0: save_checkpoint(args, accelerator, global_step + 1) if (global_step + 1) % args.validation_steps == 0 and accelerator.is_main_process: if args.use_ema: ema.store(model.parameters()) ema.copy_to(model.parameters()) with torch.no_grad(): logger.info("Generating images...") model.eval() if args.train_text_encoder: text_encoder.eval() scheduler = AmusedScheduler.from_pretrained( args.pretrained_model_name_or_path, subfolder="scheduler", revision=args.revision, variant=args.variant, ) pipe = AmusedPipeline( transformer=accelerator.unwrap_model(model), tokenizer=tokenizer, text_encoder=text_encoder, vqvae=vq_model, scheduler=scheduler, ) pil_images = pipe(prompt=args.validation_prompts).images wandb_images = [ wandb.Image(image, caption=args.validation_prompts[i]) for i, image in enumerate(pil_images) ] wandb.log({"generated_images": wandb_images}, step=global_step + 1) model.train() if args.train_text_encoder: text_encoder.train() if args.use_ema: ema.restore(model.parameters()) global_step += 1 # Stop training if max steps is reached if global_step >= args.max_train_steps: break # End for accelerator.wait_for_everyone() # Evaluate and save checkpoint at the end of training save_checkpoint(args, accelerator, global_step) # Save the final trained checkpoint if accelerator.is_main_process: model = accelerator.unwrap_model(model) if args.use_ema: ema.copy_to(model.parameters()) model.save_pretrained(args.output_dir) accelerator.end_training() def save_checkpoint(args, accelerator, global_step): output_dir = args.output_dir # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if accelerator.is_main_process and args.checkpoints_total_limit is not None: checkpoints = os.listdir(output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = Path(output_dir) / f"checkpoint-{global_step}" accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") if __name__ == "__main__": main(parse_args())
diffusers/examples/amused/train_amused.py/0
{ "file_path": "diffusers/examples/amused/train_amused.py", "repo_id": "diffusers", "token_count": 17513 }
138
""" This script performs DDIM inversion for video frames using a pre-trained model and generates a video reconstruction based on a provided prompt. It utilizes the CogVideoX pipeline to process video frames, apply the DDIM inverse scheduler, and produce an output video. **Please notice that this script is based on the CogVideoX 5B model, and would not generate a good result for 2B variants.** Usage: python cogvideox_ddim_inversion.py --model-path /path/to/model --prompt "a prompt" --video-path /path/to/video.mp4 --output-path /path/to/output For more details about the cli arguments, please run `python cogvideox_ddim_inversion.py --help`. Author: LittleNyima <littlenyima[at]163[dot]com> """ import argparse import math import os from typing import Any, Dict, List, Optional, Tuple, TypedDict, Union, cast import torch import torch.nn.functional as F import torchvision.transforms as T from transformers import T5EncoderModel, T5Tokenizer from diffusers.models.attention_processor import Attention, CogVideoXAttnProcessor2_0 from diffusers.models.autoencoders import AutoencoderKLCogVideoX from diffusers.models.embeddings import apply_rotary_emb from diffusers.models.transformers.cogvideox_transformer_3d import CogVideoXBlock, CogVideoXTransformer3DModel from diffusers.pipelines.cogvideo.pipeline_cogvideox import CogVideoXPipeline, retrieve_timesteps from diffusers.schedulers import CogVideoXDDIMScheduler, DDIMInverseScheduler from diffusers.utils import export_to_video # Must import after torch because this can sometimes lead to a nasty segmentation fault, or stack smashing error. # Very few bug reports but it happens. Look in decord Github issues for more relevant information. import decord # isort: skip class DDIMInversionArguments(TypedDict): model_path: str prompt: str video_path: str output_path: str guidance_scale: float num_inference_steps: int skip_frames_start: int skip_frames_end: int frame_sample_step: Optional[int] max_num_frames: int width: int height: int fps: int dtype: torch.dtype seed: int device: torch.device def get_args() -> DDIMInversionArguments: parser = argparse.ArgumentParser() parser.add_argument("--model_path", type=str, required=True, help="Path of the pretrained model") parser.add_argument("--prompt", type=str, required=True, help="Prompt for the direct sample procedure") parser.add_argument("--video_path", type=str, required=True, help="Path of the video for inversion") parser.add_argument("--output_path", type=str, default="output", help="Path of the output videos") parser.add_argument("--guidance_scale", type=float, default=6.0, help="Classifier-free guidance scale") parser.add_argument("--num_inference_steps", type=int, default=50, help="Number of inference steps") parser.add_argument("--skip_frames_start", type=int, default=0, help="Number of skipped frames from the start") parser.add_argument("--skip_frames_end", type=int, default=0, help="Number of skipped frames from the end") parser.add_argument("--frame_sample_step", type=int, default=None, help="Temporal stride of the sampled frames") parser.add_argument("--max_num_frames", type=int, default=81, help="Max number of sampled frames") parser.add_argument("--width", type=int, default=720, help="Resized width of the video frames") parser.add_argument("--height", type=int, default=480, help="Resized height of the video frames") parser.add_argument("--fps", type=int, default=8, help="Frame rate of the output videos") parser.add_argument("--dtype", type=str, default="bf16", choices=["bf16", "fp16"], help="Dtype of the model") parser.add_argument("--seed", type=int, default=42, help="Seed for the random number generator") parser.add_argument("--device", type=str, default="cuda", choices=["cuda", "cpu"], help="Device for inference") args = parser.parse_args() args.dtype = torch.bfloat16 if args.dtype == "bf16" else torch.float16 args.device = torch.device(args.device) return DDIMInversionArguments(**vars(args)) class CogVideoXAttnProcessor2_0ForDDIMInversion(CogVideoXAttnProcessor2_0): def __init__(self): super().__init__() def calculate_attention( self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn: Attention, batch_size: int, image_seq_length: int, text_seq_length: int, attention_mask: Optional[torch.Tensor], image_rotary_emb: Optional[torch.Tensor], ) -> Tuple[torch.Tensor, torch.Tensor]: r""" Core attention computation with inversion-guided RoPE integration. Args: query (`torch.Tensor`): `[batch_size, seq_len, dim]` query tensor key (`torch.Tensor`): `[batch_size, seq_len, dim]` key tensor value (`torch.Tensor`): `[batch_size, seq_len, dim]` value tensor attn (`Attention`): Parent attention module with projection layers batch_size (`int`): Effective batch size (after chunk splitting) image_seq_length (`int`): Length of image feature sequence text_seq_length (`int`): Length of text feature sequence attention_mask (`Optional[torch.Tensor]`): Attention mask tensor image_rotary_emb (`Optional[torch.Tensor]`): Rotary embeddings for image positions Returns: `Tuple[torch.Tensor, torch.Tensor]`: (1) hidden_states: [batch_size, image_seq_length, dim] processed image features (2) encoder_hidden_states: [batch_size, text_seq_length, dim] processed text features """ inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # Apply RoPE if needed if image_rotary_emb is not None: query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) if not attn.is_cross_attention: if key.size(2) == query.size(2): # Attention for reference hidden states key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) else: # RoPE should be applied to each group of image tokens key[:, :, text_seq_length : text_seq_length + image_seq_length] = apply_rotary_emb( key[:, :, text_seq_length : text_seq_length + image_seq_length], image_rotary_emb ) key[:, :, text_seq_length * 2 + image_seq_length :] = apply_rotary_emb( key[:, :, text_seq_length * 2 + image_seq_length :], image_rotary_emb ) hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) encoder_hidden_states, hidden_states = hidden_states.split( [text_seq_length, hidden_states.size(1) - text_seq_length], dim=1 ) return hidden_states, encoder_hidden_states def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: r""" Process the dual-path attention for the inversion-guided denoising procedure. Args: attn (`Attention`): Parent attention module hidden_states (`torch.Tensor`): `[batch_size, image_seq_len, dim]` Image tokens encoder_hidden_states (`torch.Tensor`): `[batch_size, text_seq_len, dim]` Text tokens attention_mask (`Optional[torch.Tensor]`): Optional attention mask image_rotary_emb (`Optional[torch.Tensor]`): Rotary embeddings for image tokens Returns: `Tuple[torch.Tensor, torch.Tensor]`: (1) Final hidden states: `[batch_size, image_seq_length, dim]` Resulting image tokens (2) Final encoder states: `[batch_size, text_seq_length, dim]` Resulting text tokens """ image_seq_length = hidden_states.size(1) text_seq_length = encoder_hidden_states.size(1) hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) query, query_reference = query.chunk(2) key, key_reference = key.chunk(2) value, value_reference = value.chunk(2) batch_size = batch_size // 2 hidden_states, encoder_hidden_states = self.calculate_attention( query=query, key=torch.cat((key, key_reference), dim=1), value=torch.cat((value, value_reference), dim=1), attn=attn, batch_size=batch_size, image_seq_length=image_seq_length, text_seq_length=text_seq_length, attention_mask=attention_mask, image_rotary_emb=image_rotary_emb, ) hidden_states_reference, encoder_hidden_states_reference = self.calculate_attention( query=query_reference, key=key_reference, value=value_reference, attn=attn, batch_size=batch_size, image_seq_length=image_seq_length, text_seq_length=text_seq_length, attention_mask=attention_mask, image_rotary_emb=image_rotary_emb, ) return ( torch.cat((hidden_states, hidden_states_reference)), torch.cat((encoder_hidden_states, encoder_hidden_states_reference)), ) class OverrideAttnProcessors: r""" Context manager for temporarily overriding attention processors in CogVideo transformer blocks. Designed for DDIM inversion process, replaces original attention processors with `CogVideoXAttnProcessor2_0ForDDIMInversion` and restores them upon exit. Uses Python context manager pattern to safely manage processor replacement. Typical usage: ```python with OverrideAttnProcessors(transformer): # Perform DDIM inversion operations ``` Args: transformer (`CogVideoXTransformer3DModel`): The transformer model containing attention blocks to be modified. Should have `transformer_blocks` attribute containing `CogVideoXBlock` instances. """ def __init__(self, transformer: CogVideoXTransformer3DModel): self.transformer = transformer self.original_processors = {} def __enter__(self): for block in self.transformer.transformer_blocks: block = cast(CogVideoXBlock, block) self.original_processors[id(block)] = block.attn1.get_processor() block.attn1.set_processor(CogVideoXAttnProcessor2_0ForDDIMInversion()) def __exit__(self, _0, _1, _2): for block in self.transformer.transformer_blocks: block = cast(CogVideoXBlock, block) block.attn1.set_processor(self.original_processors[id(block)]) def get_video_frames( video_path: str, width: int, height: int, skip_frames_start: int, skip_frames_end: int, max_num_frames: int, frame_sample_step: Optional[int], ) -> torch.FloatTensor: """ Extract and preprocess video frames from a video file for VAE processing. Args: video_path (`str`): Path to input video file width (`int`): Target frame width for decoding height (`int`): Target frame height for decoding skip_frames_start (`int`): Number of frames to skip at video start skip_frames_end (`int`): Number of frames to skip at video end max_num_frames (`int`): Maximum allowed number of output frames frame_sample_step (`Optional[int]`): Frame sampling step size. If None, automatically calculated as: (total_frames - skipped_frames) // max_num_frames Returns: `torch.FloatTensor`: Preprocessed frames in `[F, C, H, W]` format where: - `F`: Number of frames (adjusted to 4k + 1 for VAE compatibility) - `C`: Channels (3 for RGB) - `H`: Frame height - `W`: Frame width """ with decord.bridge.use_torch(): video_reader = decord.VideoReader(uri=video_path, width=width, height=height) video_num_frames = len(video_reader) start_frame = min(skip_frames_start, video_num_frames) end_frame = max(0, video_num_frames - skip_frames_end) if end_frame <= start_frame: indices = [start_frame] elif end_frame - start_frame <= max_num_frames: indices = list(range(start_frame, end_frame)) else: step = frame_sample_step or (end_frame - start_frame) // max_num_frames indices = list(range(start_frame, end_frame, step)) frames = video_reader.get_batch(indices=indices) frames = frames[:max_num_frames].float() # ensure that we don't go over the limit # Choose first (4k + 1) frames as this is how many is required by the VAE selected_num_frames = frames.size(0) remainder = (3 + selected_num_frames) % 4 if remainder != 0: frames = frames[:-remainder] assert frames.size(0) % 4 == 1 # Normalize the frames transform = T.Lambda(lambda x: x / 255.0 * 2.0 - 1.0) frames = torch.stack(tuple(map(transform, frames)), dim=0) return frames.permute(0, 3, 1, 2).contiguous() # [F, C, H, W] class CogVideoXDDIMInversionOutput: inverse_latents: torch.FloatTensor recon_latents: torch.FloatTensor def __init__(self, inverse_latents: torch.FloatTensor, recon_latents: torch.FloatTensor): self.inverse_latents = inverse_latents self.recon_latents = recon_latents class CogVideoXPipelineForDDIMInversion(CogVideoXPipeline): def __init__( self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, vae: AutoencoderKLCogVideoX, transformer: CogVideoXTransformer3DModel, scheduler: CogVideoXDDIMScheduler, ): super().__init__( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler, ) self.inverse_scheduler = DDIMInverseScheduler(**scheduler.config) def encode_video_frames(self, video_frames: torch.FloatTensor) -> torch.FloatTensor: """ Encode video frames into latent space using Variational Autoencoder. Args: video_frames (`torch.FloatTensor`): Input frames tensor in `[F, C, H, W]` format from `get_video_frames()` Returns: `torch.FloatTensor`: Encoded latents in `[1, F, D, H_latent, W_latent]` format where: - `F`: Number of frames (same as input) - `D`: Latent channel dimension - `H_latent`: Latent space height (H // 2^vae.downscale_factor) - `W_latent`: Latent space width (W // 2^vae.downscale_factor) """ vae: AutoencoderKLCogVideoX = self.vae video_frames = video_frames.to(device=vae.device, dtype=vae.dtype) video_frames = video_frames.unsqueeze(0).permute(0, 2, 1, 3, 4) # [B, C, F, H, W] latent_dist = vae.encode(x=video_frames).latent_dist.sample().transpose(1, 2) return latent_dist * vae.config.scaling_factor @torch.no_grad() def export_latents_to_video(self, latents: torch.FloatTensor, video_path: str, fps: int): r""" Decode latent vectors into video and export as video file. Args: latents (`torch.FloatTensor`): Encoded latents in `[B, F, D, H_latent, W_latent]` format from `encode_video_frames()` video_path (`str`): Output path for video file fps (`int`): Target frames per second for output video """ video = self.decode_latents(latents) frames = self.video_processor.postprocess_video(video=video, output_type="pil") os.makedirs(os.path.dirname(video_path), exist_ok=True) export_to_video(video_frames=frames[0], output_video_path=video_path, fps=fps) # Modified from CogVideoXPipeline.__call__ @torch.no_grad() def sample( self, latents: torch.FloatTensor, scheduler: Union[DDIMInverseScheduler, CogVideoXDDIMScheduler], prompt: Optional[Union[str, List[str]]] = None, negative_prompt: Optional[Union[str, List[str]]] = None, num_inference_steps: int = 50, guidance_scale: float = 6, use_dynamic_cfg: bool = False, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, attention_kwargs: Optional[Dict[str, Any]] = None, reference_latents: torch.FloatTensor = None, ) -> torch.FloatTensor: r""" Execute the core sampling loop for video generation/inversion using CogVideoX. Implements the full denoising trajectory recording for both DDIM inversion and generation processes. Supports dynamic classifier-free guidance and reference latent conditioning. Args: latents (`torch.FloatTensor`): Initial noise tensor of shape `[B, F, C, H, W]`. scheduler (`Union[DDIMInverseScheduler, CogVideoXDDIMScheduler]`): Scheduling strategy for diffusion process. Use: (1) `DDIMInverseScheduler` for inversion (2) `CogVideoXDDIMScheduler` for generation prompt (`Optional[Union[str, List[str]]]`): Text prompt(s) for conditional generation. Defaults to unconditional. negative_prompt (`Optional[Union[str, List[str]]]`): Negative prompt(s) for guidance. Requires `guidance_scale > 1`. num_inference_steps (`int`): Number of denoising steps. Affects quality/compute trade-off. guidance_scale (`float`): Classifier-free guidance weight. 1.0 = no guidance. use_dynamic_cfg (`bool`): Enable time-varying guidance scale (cosine schedule) eta (`float`): DDIM variance parameter (0 = deterministic process) generator (`Optional[Union[torch.Generator, List[torch.Generator]]]`): Random number generator(s) for reproducibility attention_kwargs (`Optional[Dict[str, Any]]`): Custom parameters for attention modules reference_latents (`torch.FloatTensor`): Reference latent trajectory for conditional sampling. Shape should match `[T, B, F, C, H, W]` where `T` is number of timesteps Returns: `torch.FloatTensor`: Full denoising trajectory tensor of shape `[T, B, F, C, H, W]`. """ self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._interrupt = False device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, negative_prompt, do_classifier_free_guidance, device=device, ) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) if reference_latents is not None: prompt_embeds = torch.cat([prompt_embeds] * 2, dim=0) # 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps(scheduler, num_inference_steps, device) self._num_timesteps = len(timesteps) # 5. Prepare latents. latents = latents.to(device=device) * scheduler.init_noise_sigma # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) if isinstance(scheduler, DDIMInverseScheduler): # Inverse scheduler does not accept extra kwargs extra_step_kwargs = {} # 7. Create rotary embeds if required image_rotary_emb = ( self._prepare_rotary_positional_embeddings( height=latents.size(3) * self.vae_scale_factor_spatial, width=latents.size(4) * self.vae_scale_factor_spatial, num_frames=latents.size(1), device=device, ) if self.transformer.config.use_rotary_positional_embeddings else None ) # 8. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * scheduler.order, 0) trajectory = torch.zeros_like(latents).unsqueeze(0).repeat(len(timesteps), 1, 1, 1, 1, 1) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents if reference_latents is not None: reference = reference_latents[i] reference = torch.cat([reference] * 2) if do_classifier_free_guidance else reference latent_model_input = torch.cat([latent_model_input, reference], dim=0) latent_model_input = scheduler.scale_model_input(latent_model_input, t) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]) # predict noise model_output noise_pred = self.transformer( hidden_states=latent_model_input, encoder_hidden_states=prompt_embeds, timestep=timestep, image_rotary_emb=image_rotary_emb, attention_kwargs=attention_kwargs, return_dict=False, )[0] noise_pred = noise_pred.float() if reference_latents is not None: # Recover the original batch size noise_pred, _ = noise_pred.chunk(2) # perform guidance if use_dynamic_cfg: self._guidance_scale = 1 + guidance_scale * ( (1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2 ) if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the noisy sample x_t-1 -> x_t latents = scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] latents = latents.to(prompt_embeds.dtype) trajectory[i] = latents if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0): progress_bar.update() # Offload all models self.maybe_free_model_hooks() return trajectory @torch.no_grad() def __call__( self, prompt: str, video_path: str, guidance_scale: float, num_inference_steps: int, skip_frames_start: int, skip_frames_end: int, frame_sample_step: Optional[int], max_num_frames: int, width: int, height: int, seed: int, ): """ Performs DDIM inversion on a video to reconstruct it with a new prompt. Args: prompt (`str`): The text prompt to guide the reconstruction. video_path (`str`): Path to the input video file. guidance_scale (`float`): Scale for classifier-free guidance. num_inference_steps (`int`): Number of denoising steps. skip_frames_start (`int`): Number of frames to skip from the beginning of the video. skip_frames_end (`int`): Number of frames to skip from the end of the video. frame_sample_step (`Optional[int]`): Step size for sampling frames. If None, all frames are used. max_num_frames (`int`): Maximum number of frames to process. width (`int`): Width of the output video frames. height (`int`): Height of the output video frames. seed (`int`): Random seed for reproducibility. Returns: `CogVideoXDDIMInversionOutput`: Contains the inverse latents and reconstructed latents. """ if not self.transformer.config.use_rotary_positional_embeddings: raise NotImplementedError("This script supports CogVideoX 5B model only.") video_frames = get_video_frames( video_path=video_path, width=width, height=height, skip_frames_start=skip_frames_start, skip_frames_end=skip_frames_end, max_num_frames=max_num_frames, frame_sample_step=frame_sample_step, ).to(device=self.device) video_latents = self.encode_video_frames(video_frames=video_frames) inverse_latents = self.sample( latents=video_latents, scheduler=self.inverse_scheduler, prompt="", num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=torch.Generator(device=self.device).manual_seed(seed), ) with OverrideAttnProcessors(transformer=self.transformer): recon_latents = self.sample( latents=torch.randn_like(video_latents), scheduler=self.scheduler, prompt=prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=torch.Generator(device=self.device).manual_seed(seed), reference_latents=reversed(inverse_latents), ) return CogVideoXDDIMInversionOutput( inverse_latents=inverse_latents, recon_latents=recon_latents, ) if __name__ == "__main__": arguments = get_args() pipeline = CogVideoXPipelineForDDIMInversion.from_pretrained( arguments.pop("model_path"), torch_dtype=arguments.pop("dtype"), ).to(device=arguments.pop("device")) output_path = arguments.pop("output_path") fps = arguments.pop("fps") inverse_video_path = os.path.join(output_path, f"{arguments.get('video_path')}_inversion.mp4") recon_video_path = os.path.join(output_path, f"{arguments.get('video_path')}_reconstruction.mp4") # Run DDIM inversion output = pipeline(**arguments) pipeline.export_latents_to_video(output.inverse_latents[-1], inverse_video_path, fps) pipeline.export_latents_to_video(output.recon_latents[-1], recon_video_path, fps)
diffusers/examples/community/cogvideox_ddim_inversion.py/0
{ "file_path": "diffusers/examples/community/cogvideox_ddim_inversion.py", "repo_id": "diffusers", "token_count": 12424 }
139
import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers.image_processor import VaeImageProcessor from diffusers.loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker from diffusers.schedulers import LCMScheduler from diffusers.utils import ( USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import DiffusionPipeline >>> pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", custom_pipeline="latent_consistency_interpolate") >>> # To save GPU memory, torch.float16 can be used, but it may compromise image quality. >>> pipe.to(torch_device="cuda", torch_dtype=torch.float32) >>> prompts = ["A cat", "A dog", "A horse"] >>> num_inference_steps = 4 >>> num_interpolation_steps = 24 >>> seed = 1337 >>> torch.manual_seed(seed) >>> np.random.seed(seed) >>> images = pipe( prompt=prompts, height=512, width=512, num_inference_steps=num_inference_steps, num_interpolation_steps=num_interpolation_steps, guidance_scale=8.0, embedding_interpolation_type="lerp", latent_interpolation_type="slerp", process_batch_size=4, # Make it higher or lower based on your GPU memory generator=torch.Generator(seed), ) >>> # Save the images as a video >>> import imageio >>> from PIL import Image >>> def pil_to_video(images: List[Image.Image], filename: str, fps: int = 60) -> None: frames = [np.array(image) for image in images] with imageio.get_writer(filename, fps=fps) as video_writer: for frame in frames: video_writer.append_data(frame) >>> pil_to_video(images, "lcm_interpolate.mp4", fps=24) ``` """ def lerp( v0: Union[torch.Tensor, np.ndarray], v1: Union[torch.Tensor, np.ndarray], t: Union[float, torch.Tensor, np.ndarray], ) -> Union[torch.Tensor, np.ndarray]: """ Linearly interpolate between two vectors/tensors. Args: v0 (`torch.Tensor` or `np.ndarray`): First vector/tensor. v1 (`torch.Tensor` or `np.ndarray`): Second vector/tensor. t: (`float`, `torch.Tensor`, or `np.ndarray`): Interpolation factor. If float, must be between 0 and 1. If np.ndarray or torch.Tensor, must be one dimensional with values between 0 and 1. Returns: Union[torch.Tensor, np.ndarray] Interpolated vector/tensor between v0 and v1. """ inputs_are_torch = False t_is_float = False if isinstance(v0, torch.Tensor): inputs_are_torch = True input_device = v0.device v0 = v0.cpu().numpy() v1 = v1.cpu().numpy() if isinstance(t, torch.Tensor): inputs_are_torch = True input_device = t.device t = t.cpu().numpy() elif isinstance(t, float): t_is_float = True t = np.array([t]) t = t[..., None] v0 = v0[None, ...] v1 = v1[None, ...] v2 = (1 - t) * v0 + t * v1 if t_is_float and v0.ndim > 1: assert v2.shape[0] == 1 v2 = np.squeeze(v2, axis=0) if inputs_are_torch: v2 = torch.from_numpy(v2).to(input_device) return v2 def slerp( v0: Union[torch.Tensor, np.ndarray], v1: Union[torch.Tensor, np.ndarray], t: Union[float, torch.Tensor, np.ndarray], DOT_THRESHOLD=0.9995, ) -> Union[torch.Tensor, np.ndarray]: """ Spherical linear interpolation between two vectors/tensors. Args: v0 (`torch.Tensor` or `np.ndarray`): First vector/tensor. v1 (`torch.Tensor` or `np.ndarray`): Second vector/tensor. t: (`float`, `torch.Tensor`, or `np.ndarray`): Interpolation factor. If float, must be between 0 and 1. If np.ndarray or torch.Tensor, must be one dimensional with values between 0 and 1. DOT_THRESHOLD (`float`, *optional*, default=0.9995): Threshold for when to use linear interpolation instead of spherical interpolation. Returns: `torch.Tensor` or `np.ndarray`: Interpolated vector/tensor between v0 and v1. """ inputs_are_torch = False t_is_float = False if isinstance(v0, torch.Tensor): inputs_are_torch = True input_device = v0.device v0 = v0.cpu().numpy() v1 = v1.cpu().numpy() if isinstance(t, torch.Tensor): inputs_are_torch = True input_device = t.device t = t.cpu().numpy() elif isinstance(t, float): t_is_float = True t = np.array([t], dtype=v0.dtype) dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1))) if np.abs(dot) > DOT_THRESHOLD: # v1 and v2 are close to parallel # Use linear interpolation instead v2 = lerp(v0, v1, t) else: theta_0 = np.arccos(dot) sin_theta_0 = np.sin(theta_0) theta_t = theta_0 * t sin_theta_t = np.sin(theta_t) s0 = np.sin(theta_0 - theta_t) / sin_theta_0 s1 = sin_theta_t / sin_theta_0 s0 = s0[..., None] s1 = s1[..., None] v0 = v0[None, ...] v1 = v1[None, ...] v2 = s0 * v0 + s1 * v1 if t_is_float and v0.ndim > 1: assert v2.shape[0] == 1 v2 = np.squeeze(v2, axis=0) if inputs_are_torch: v2 = torch.from_numpy(v2).to(input_device) return v2 class LatentConsistencyModelWalkPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, FromSingleFileMixin, ): r""" Pipeline for text-to-image generation using a latent consistency model. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Currently only supports [`LCMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. requires_safety_checker (`bool`, *optional*, defaults to `True`): Whether the pipeline requires a safety checker component. """ model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor"] _exclude_from_cpu_offload = ["safety_checker"] _callback_tensor_inputs = ["latents", "denoised", "prompt_embeds", "w_embedding"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: LCMScheduler, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: timesteps (`torch.Tensor`): generate embedding vectors at these timesteps embedding_dim (`int`, *optional*, defaults to 512): dimension of the embeddings to generate dtype: data type of the generated embeddings Returns: `torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Currently StableDiffusionPipeline.check_inputs with negative prompt stuff removed def check_inputs( self, prompt: Union[str, List[str]], height: int, width: int, callback_steps: int, prompt_embeds: Optional[torch.Tensor] = None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") @torch.no_grad() def interpolate_embedding( self, start_embedding: torch.Tensor, end_embedding: torch.Tensor, num_interpolation_steps: Union[int, List[int]], interpolation_type: str, ) -> torch.Tensor: if interpolation_type == "lerp": interpolation_fn = lerp elif interpolation_type == "slerp": interpolation_fn = slerp else: raise ValueError( f"embedding_interpolation_type must be one of ['lerp', 'slerp'], got {interpolation_type}." ) embedding = torch.cat([start_embedding, end_embedding]) steps = torch.linspace(0, 1, num_interpolation_steps, dtype=embedding.dtype).cpu().numpy() steps = np.expand_dims(steps, axis=tuple(range(1, embedding.ndim))) interpolations = [] # Interpolate between text embeddings # TODO(aryan): Think of a better way of doing this # See if it can be done parallelly instead for i in range(embedding.shape[0] - 1): interpolations.append(interpolation_fn(embedding[i], embedding[i + 1], steps).squeeze(dim=1)) interpolations = torch.cat(interpolations) return interpolations @torch.no_grad() def interpolate_latent( self, start_latent: torch.Tensor, end_latent: torch.Tensor, num_interpolation_steps: Union[int, List[int]], interpolation_type: str, ) -> torch.Tensor: if interpolation_type == "lerp": interpolation_fn = lerp elif interpolation_type == "slerp": interpolation_fn = slerp latent = torch.cat([start_latent, end_latent]) steps = torch.linspace(0, 1, num_interpolation_steps, dtype=latent.dtype).cpu().numpy() steps = np.expand_dims(steps, axis=tuple(range(1, latent.ndim))) interpolations = [] # Interpolate between latents # TODO: Think of a better way of doing this # See if it can be done parallelly instead for i in range(latent.shape[0] - 1): interpolations.append(interpolation_fn(latent[i], latent[i + 1], steps).squeeze(dim=1)) return torch.cat(interpolations) @property def guidance_scale(self): return self._guidance_scale @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def clip_skip(self): return self._clip_skip @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 4, num_interpolation_steps: int = 8, original_inference_steps: int = None, guidance_scale: float = 8.5, num_images_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], embedding_interpolation_type: str = "lerp", latent_interpolation_type: str = "slerp", process_batch_size: int = 4, **kwargs, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. original_inference_steps (`int`, *optional*): The original number of inference steps use to generate a linearly-spaced timestep schedule, from which we will draw `num_inference_steps` evenly spaced timesteps from as our final timestep schedule, following the Skipping-Step method in the paper (see Section 4.3). If not set this will default to the scheduler's `original_inference_steps` attribute. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. Note that the original latent consistency models paper uses a different CFG formulation where the guidance scales are decreased by 1 (so in the paper formulation CFG is enabled when `guidance_scale > 0`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. embedding_interpolation_type (`str`, *optional*, defaults to `"lerp"`): The type of interpolation to use for interpolating between text embeddings. Choose between `"lerp"` and `"slerp"`. latent_interpolation_type (`str`, *optional*, defaults to `"slerp"`): The type of interpolation to use for interpolating between latents. Choose between `"lerp"` and `"slerp"`. process_batch_size (`int`, *optional*, defaults to 4): The batch size to use for processing the images. This is useful when generating a large number of images and you want to avoid running out of memory. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps, prompt_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if batch_size < 2: raise ValueError(f"`prompt` must have length of at least 2 but found {batch_size}") if num_images_per_prompt != 1: raise ValueError("`num_images_per_prompt` must be `1` as no other value is supported yet") if prompt_embeds is not None: raise ValueError("`prompt_embeds` must be None since it is not supported yet") if latents is not None: raise ValueError("`latents` must be None since it is not supported yet") device = self._execution_device # do_classifier_free_guidance = guidance_scale > 1.0 lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) self.scheduler.set_timesteps(num_inference_steps, device, original_inference_steps=original_inference_steps) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels # bs = batch_size * num_images_per_prompt # 3. Encode initial input prompt prompt_embeds_1, _ = self.encode_prompt( prompt[:1], device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=False, negative_prompt=None, prompt_embeds=prompt_embeds, negative_prompt_embeds=None, lora_scale=lora_scale, clip_skip=self.clip_skip, ) # 4. Prepare initial latent variables latents_1 = self.prepare_latents( 1, num_channels_latents, height, width, prompt_embeds_1.dtype, device, generator, latents, ) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, None) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) images = [] # 5. Iterate over prompts and perform latent walk. Note that we do this two prompts at a time # otherwise the memory usage ends up being too high. with self.progress_bar(total=batch_size - 1) as prompt_progress_bar: for i in range(1, batch_size): # 6. Encode current prompt prompt_embeds_2, _ = self.encode_prompt( prompt[i : i + 1], device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=False, negative_prompt=None, prompt_embeds=prompt_embeds, negative_prompt_embeds=None, lora_scale=lora_scale, clip_skip=self.clip_skip, ) # 7. Prepare current latent variables latents_2 = self.prepare_latents( 1, num_channels_latents, height, width, prompt_embeds_2.dtype, device, generator, latents, ) # 8. Interpolate between previous and current prompt embeddings and latents inference_embeddings = self.interpolate_embedding( start_embedding=prompt_embeds_1, end_embedding=prompt_embeds_2, num_interpolation_steps=num_interpolation_steps, interpolation_type=embedding_interpolation_type, ) inference_latents = self.interpolate_latent( start_latent=latents_1, end_latent=latents_2, num_interpolation_steps=num_interpolation_steps, interpolation_type=latent_interpolation_type, ) next_prompt_embeds = inference_embeddings[-1:].detach().clone() next_latents = inference_latents[-1:].detach().clone() bs = num_interpolation_steps # 9. Perform inference in batches. Note the use of `process_batch_size` to control the batch size # of the inference. This is useful for reducing memory usage and can be configured based on the # available GPU memory. with self.progress_bar( total=(bs + process_batch_size - 1) // process_batch_size ) as batch_progress_bar: for batch_index in range(0, bs, process_batch_size): batch_inference_latents = inference_latents[batch_index : batch_index + process_batch_size] batch_inference_embeddings = inference_embeddings[ batch_index : batch_index + process_batch_size ] self.scheduler.set_timesteps( num_inference_steps, device, original_inference_steps=original_inference_steps ) timesteps = self.scheduler.timesteps current_bs = batch_inference_embeddings.shape[0] w = torch.tensor(self.guidance_scale - 1).repeat(current_bs) w_embedding = self.get_guidance_scale_embedding( w, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents_1.dtype) # 10. Perform inference for current batch with self.progress_bar(total=num_inference_steps) as progress_bar: for index, t in enumerate(timesteps): batch_inference_latents = batch_inference_latents.to(batch_inference_embeddings.dtype) # model prediction (v-prediction, eps, x) model_pred = self.unet( batch_inference_latents, t, timestep_cond=w_embedding, encoder_hidden_states=batch_inference_embeddings, cross_attention_kwargs=self.cross_attention_kwargs, return_dict=False, )[0] # compute the previous noisy sample x_t -> x_t-1 batch_inference_latents, denoised = self.scheduler.step( model_pred, t, batch_inference_latents, **extra_step_kwargs, return_dict=False ) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, index, t, callback_kwargs) batch_inference_latents = callback_outputs.pop("latents", batch_inference_latents) batch_inference_embeddings = callback_outputs.pop( "prompt_embeds", batch_inference_embeddings ) w_embedding = callback_outputs.pop("w_embedding", w_embedding) denoised = callback_outputs.pop("denoised", denoised) # call the callback, if provided if index == len(timesteps) - 1 or ( (index + 1) > num_warmup_steps and (index + 1) % self.scheduler.order == 0 ): progress_bar.update() if callback is not None and index % callback_steps == 0: step_idx = index // getattr(self.scheduler, "order", 1) callback(step_idx, t, batch_inference_latents) denoised = denoised.to(batch_inference_embeddings.dtype) # Note: This is not supported because you would get black images in your latent walk if # NSFW concept is detected # if not output_type == "latent": # image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0] # image, has_nsfw_concept = self.run_safety_checker(image, device, inference_embeddings.dtype) # else: # image = denoised # has_nsfw_concept = None # if has_nsfw_concept is None: # do_denormalize = [True] * image.shape[0] # else: # do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0] do_denormalize = [True] * image.shape[0] has_nsfw_concept = None image = self.image_processor.postprocess( image, output_type=output_type, do_denormalize=do_denormalize ) images.append(image) batch_progress_bar.update() prompt_embeds_1 = next_prompt_embeds latents_1 = next_latents prompt_progress_bar.update() # 11. Determine what should be returned if output_type == "pil": images = [image for image_list in images for image in image_list] elif output_type == "np": images = np.concatenate(images) elif output_type == "pt": images = torch.cat(images) else: raise ValueError("`output_type` must be one of 'pil', 'np' or 'pt'.") # Offload all models self.maybe_free_model_hooks() if not return_dict: return (images, has_nsfw_concept) return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
diffusers/examples/community/latent_consistency_interpolate.py/0
{ "file_path": "diffusers/examples/community/latent_consistency_interpolate.py", "repo_id": "diffusers", "token_count": 22092 }
140
#!/usr/bin/env python3 import torch from diffusers import DiffusionPipeline class UnetSchedulerOneForwardPipeline(DiffusionPipeline): def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) def __call__(self): image = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), ) timestep = 1 model_output = self.unet(image, timestep).sample scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample result = scheduler_output - scheduler_output + torch.ones_like(scheduler_output) return result
diffusers/examples/community/one_step_unet.py/0
{ "file_path": "diffusers/examples/community/one_step_unet.py", "repo_id": "diffusers", "token_count": 299 }
141
# Copyright 2025 Stability AI, Kwai-Kolors Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import IPAdapterMixin, StableDiffusionXLLoraLoaderMixin from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel from diffusers.models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor from diffusers.pipelines.kolors.pipeline_output import KolorsPipelineOutput from diffusers.pipelines.kolors.text_encoder import ChatGLMModel from diffusers.pipelines.kolors.tokenizer import ChatGLMTokenizer from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import is_torch_xla_available, logging, replace_example_docstring from diffusers.utils.torch_utils import randn_tensor if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import KolorsDifferentialImg2ImgPipeline >>> from diffusers.utils import load_image >>> pipe = KolorsDifferentialImg2ImgPipeline.from_pretrained( ... "Kwai-Kolors/Kolors-diffusers", variant="fp16", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> url = ( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/kolors/bunny_source.png" ... ) >>> init_image = load_image(url) >>> prompt = "high quality image of a capybara wearing sunglasses. In the background of the image there are trees, poles, grass and other objects. At the bottom of the object there is the road., 8k, highly detailed." >>> image = pipe(prompt, image=init_image).images[0] ``` """ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class KolorsDifferentialImg2ImgPipeline( DiffusionPipeline, StableDiffusionMixin, StableDiffusionXLLoraLoaderMixin, IPAdapterMixin ): r""" Pipeline for text-to-image generation using Kolors. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) The pipeline also inherits the following loading methods: - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`ChatGLMModel`]): Frozen text-encoder. Kolors uses [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b). tokenizer (`ChatGLMTokenizer`): Tokenizer of class [ChatGLMTokenizer](https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"False"`): Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of `Kwai-Kolors/Kolors-diffusers`. """ model_cpu_offload_seq = "text_encoder->image_encoder-unet->vae" _optional_components = [ "image_encoder", "feature_extractor", ] _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", "add_text_embeds", "add_time_ids", "negative_pooled_prompt_embeds", "negative_add_time_ids", ] def __init__( self, vae: AutoencoderKL, text_encoder: ChatGLMModel, tokenizer: ChatGLMTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, image_encoder: CLIPVisionModelWithProjection = None, feature_extractor: CLIPImageProcessor = None, force_zeros_for_empty_prompt: bool = False, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, image_encoder=image_encoder, feature_extractor=feature_extractor, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.mask_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_convert_grayscale=True ) self.default_sample_size = ( self.unet.config.sample_size if hasattr(self, "unet") and self.unet is not None and hasattr(self.unet.config, "sample_size") else 128 ) # Copied from diffusers.pipelines.kolors.pipeline_kolors.KolorsPipeline.encode_prompt def encode_prompt( self, prompt, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, max_sequence_length: int = 256, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. """ # from IPython import embed; embed(); exit() device = device or self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer] text_encoders = [self.text_encoder] if prompt_embeds is None: prompt_embeds_list = [] for tokenizer, text_encoder in zip(tokenizers, text_encoders): text_inputs = tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, return_tensors="pt", ).to(device) output = text_encoder( input_ids=text_inputs["input_ids"], attention_mask=text_inputs["attention_mask"], position_ids=text_inputs["position_ids"], output_hidden_states=True, ) # [max_sequence_length, batch, hidden_size] -> [batch, max_sequence_length, hidden_size] # clone to have a contiguous tensor prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() # [max_sequence_length, batch, hidden_size] -> [batch, hidden_size] pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() bs_embed, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_embeds_list.append(prompt_embeds) prompt_embeds = prompt_embeds_list[0] # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt negative_prompt_embeds_list = [] for tokenizer, text_encoder in zip(tokenizers, text_encoders): uncond_input = tokenizer( uncond_tokens, padding="max_length", max_length=max_sequence_length, truncation=True, return_tensors="pt", ).to(device) output = text_encoder( input_ids=uncond_input["input_ids"], attention_mask=uncond_input["attention_mask"], position_ids=uncond_input["position_ids"], output_hidden_states=True, ) # [max_sequence_length, batch, hidden_size] -> [batch, max_sequence_length, hidden_size] # clone to have a contiguous tensor negative_prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() # [max_sequence_length, batch, hidden_size] -> [batch, hidden_size] negative_pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view( batch_size * num_images_per_prompt, seq_len, -1 ) negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = negative_prompt_embeds_list[0] bs_embed = pooled_prompt_embeds.shape[0] pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds def prepare_ip_adapter_image_embeds( self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance ): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError( f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." ) for single_ip_adapter_image, image_proj_layer in zip( ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers ): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) single_image_embeds, single_negative_image_embeds = self.encode_image( single_ip_adapter_image, device, 1, output_hidden_state ) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for i, single_image_embeds in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, strength, num_inference_steps, height, width, negative_prompt=None, prompt_embeds=None, pooled_prompt_embeds=None, negative_prompt_embeds=None, negative_pooled_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None, ): if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if not isinstance(num_inference_steps, int) or num_inference_steps <= 0: raise ValueError( f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" f" {type(num_inference_steps)}." ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError( "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." ) if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError( f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" ) elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError( f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" ) if max_sequence_length is not None and max_sequence_length > 256: raise ValueError(f"`max_sequence_length` cannot be greater than 256 but is {max_sequence_length}") # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): # get the original timestep using init_timestep if denoising_start is None: init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) else: t_start = 0 timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] # Strength is irrelevant if we directly request a timestep to start at; # that is, strength is determined by the denoising_start instead. if denoising_start is not None: discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (denoising_start * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item() if self.scheduler.order == 2 and num_inference_steps % 2 == 0: # if the scheduler is a 2nd order scheduler we might have to do +1 # because `num_inference_steps` might be even given that every timestep # (except the highest one) is duplicated. If `num_inference_steps` is even it would # mean that we cut the timesteps in the middle of the denoising step # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1 # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler num_inference_steps = num_inference_steps + 1 # because t_n+1 >= t_n, we slice the timesteps starting from the end timesteps = timesteps[-num_inference_steps:] return timesteps, num_inference_steps return timesteps, num_inference_steps - t_start # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.prepare_latents def prepare_latents( self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True ): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) latents_mean = latents_std = None if hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1) if hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None: latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1) # Offload text encoder if `enable_model_cpu_offload` was enabled if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.text_encoder_2.to("cpu") torch.cuda.empty_cache() image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: # make sure the VAE is in float32 mode, as it overflows in float16 if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(generator, list): if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} " ) init_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) init_latents = init_latents.to(dtype) if latents_mean is not None and latents_std is not None: latents_mean = latents_mean.to(device=device, dtype=dtype) latents_std = latents_std.to(device=device, dtype=dtype) init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std else: init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: # expand init_latents for batch_size additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) if add_noise: shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids def _get_add_time_ids( self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None ): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, ( AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0, ), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding( self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 ) -> torch.Tensor: """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: w (`torch.Tensor`): Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. embedding_dim (`int`, *optional*, defaults to 512): Dimension of the embeddings to generate. dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): Data type of the generated embeddings. Returns: `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_start(self): return self._denoising_start @property def denoising_end(self): return self._denoising_end @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, image: PipelineImageInput = None, strength: float = 0.3, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, timesteps: List[int] = None, sigmas: List[float] = None, denoising_start: Optional[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, negative_original_size: Optional[Tuple[int, int]] = None, negative_crops_coords_top_left: Tuple[int, int] = (0, 0), negative_target_size: Optional[Tuple[int, int]] = None, callback_on_step_end: Optional[ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 256, map: PipelineImageInput = None, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. image (`torch.Tensor` or `PIL.Image.Image` or `np.ndarray` or `List[torch.Tensor]` or `List[PIL.Image.Image]` or `List[np.ndarray]`): The image(s) to modify with the pipeline. strength (`float`, *optional*, defaults to 0.3): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. Note that in the case of `denoising_start` being declared as an integer, the value of `strength` will be ignored. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. Anything below 512 pixels won't work well for [Kwai-Kolors/Kolors-diffusers](https://huggingface.co/Kwai-Kolors/Kolors-diffusers) and checkpoints that are not specifically fine-tuned on low resolutions. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. This is set to 1024 by default for the best results. Anything below 512 pixels won't work well for [Kwai-Kolors/Kolors-diffusers](https://huggingface.co/Kwai-Kolors/Kolors-diffusers) and checkpoints that are not specifically fine-tuned on low resolutions. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. sigmas (`List[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. denoising_start (`float`, *optional*): When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refine Image Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality). denoising_end (`float`, *optional*): When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be completed before it is intentionally prematurely terminated. As a result, the returned sample will still retain a substantial amount of noise as determined by the discrete timesteps selected by the scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not provided, embeddings are computed from the `ip_adapter_image` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.kolors.KolorsPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a specific image resolution. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a target image resolution. It should be as same as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. Examples: Returns: [`~pipelines.kolors.KolorsPipelineOutput`] or `tuple`: [`~pipelines.kolors.KolorsPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 0. Default height and width to unet height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, strength, num_inference_steps, height, width, negative_prompt, prompt_embeds, pooled_prompt_embeds, negative_prompt_embeds, negative_pooled_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length, ) self._guidance_scale = guidance_scale self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._denoising_start = denoising_start self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Encode input prompt ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # 4. Preprocess image init_image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) map = self.mask_processor.preprocess( map, height=height // self.vae_scale_factor, width=width // self.vae_scale_factor ).to(device) # 5. Prepare timesteps def denoising_value_valid(dnv): return isinstance(dnv, float) and 0 < dnv < 1 timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, timesteps, sigmas ) # begin diff diff change total_time_steps = num_inference_steps # end diff diff change timesteps, num_inference_steps = self.get_timesteps( num_inference_steps, strength, device, denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None, ) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) add_noise = True if self.denoising_start is None else False # 6. Prepare latent variables if latents is None: latents = self.prepare_latents( init_image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator, add_noise, ) # 7. Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) height, width = latents.shape[-2:] height = height * self.vae_scale_factor width = width * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 8. Prepare added time ids & embeddings add_text_embeds = pooled_prompt_embeds text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) add_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids( negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) else: negative_add_time_ids = add_time_ids if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds( ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance, ) # 9. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) # preparations for diff diff original_with_noise = self.prepare_latents( init_image, timesteps, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator ) thresholds = torch.arange(total_time_steps, dtype=map.dtype) / total_time_steps thresholds = thresholds.unsqueeze(1).unsqueeze(1).to(device) masks = map.squeeze() > thresholds # end diff diff preparations # 9.1 Apply denoising_end if ( self.denoising_end is not None and self.denoising_start is not None and denoising_value_valid(self.denoising_end) and denoising_value_valid(self.denoising_start) and self.denoising_start >= self.denoising_end ): raise ValueError( f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: " + f" {self.denoising_end} when using type float." ) elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (self.denoising_end * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] # 9.2 Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # diff diff if i == 0: latents = original_with_noise[:1] else: mask = masks[i].unsqueeze(0).to(latents.dtype) mask = mask.unsqueeze(1) # fit shape latents = original_with_noise[i] * mask + latents * (1 - mask) # end diff diff # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} if ip_adapter_image is not None or ip_adapter_image_embeds is not None: added_cond_kwargs["image_embeds"] = image_embeds noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop( "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds ) add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) elif latents.dtype != self.vae.dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 self.vae = self.vae.to(latents.dtype) # unscale/denormalize the latents latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == "latent": image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return KolorsPipelineOutput(images=image)
diffusers/examples/community/pipeline_kolors_differential_img2img.py/0
{ "file_path": "diffusers/examples/community/pipeline_kolors_differential_img2img.py", "repo_id": "diffusers", "token_count": 29911 }
142
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import intel_extension_for_pytorch as ipex import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection, ) from diffusers import StableDiffusionXLPipeline from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import ( StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ) from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.models.attention_processor import ( AttnProcessor2_0, XFormersAttnProcessor, ) from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( USE_PEFT_BACKEND, deprecate, is_invisible_watermark_available, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import randn_tensor if is_invisible_watermark_available(): from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import StableDiffusionXLPipelineIpex >>> # SDXL-Turbo, a distilled version of SDXL 1.0, trained for real-time synthesis >>> pipe = StableDiffusionXLPipelineIpex.from_pretrained( ... "stabilityai/sdxl-turbo", low_cpu_mem_usage=True, use_safetensors=True ... ) >>> num_inference_steps = 1 >>> guidance_scale = 0.0 >>> use_bf16 = True >>> data_type = torch.bfloat16 if use_bf16 else torch.float32 >>> prompt = "a photo of an astronaut riding a horse on mars" >>> # value of image height/width should be consistent with the pipeline inference >>> # For Float32 >>> pipe.prepare_for_ipex(torch.float32, prompt, height=512, width=512) >>> # For BFloat16 >>> pipe.prepare_for_ipex(torch.bfloat16, prompt, height=512, width=512) >>> # value of image height/width should be consistent with 'prepare_for_ipex()' >>> # For Float32 >>> image = pipe(prompt, num_inference_steps=num_inference_steps, height=512, width=512, guidance_scale=guidance_scale).images[0] >>> # For BFloat16 >>> with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16): >>> image = pipe(prompt, num_inference_steps=num_inference_steps, height=512, width=512, guidance_scale=guidance_scale).images[0] ``` """ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class StableDiffusionXLPipelineIpex( StableDiffusionXLPipeline, ): r""" Pipeline for text-to-image generation using Stable Diffusion XL on IPEX. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) In addition the pipeline inherits the following loading methods: - *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`] as well as the following saving methods: - *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion XL uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. text_encoder_2 ([` CLIPTextModelWithProjection`]): Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically the [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 (`CLIPTokenizer`): Second Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of `stabilityai/stable-diffusion-xl-base-1-0`. add_watermarker (`bool`, *optional*): Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to watermark output images. If not defined, it will default to True if the package is installed, otherwise no watermarker will be used. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" _optional_components = [ "tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2", "image_encoder", "feature_extractor", ] _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", "add_text_embeds", "add_time_ids", "negative_pooled_prompt_embeds", "negative_add_time_ids", ] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, image_encoder: CLIPVisionModelWithProjection = None, feature_extractor: CLIPImageProcessor = None, force_zeros_for_empty_prompt: bool = True, add_watermarker: Optional[bool] = None, ): # super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler, image_encoder=image_encoder, feature_extractor=feature_extractor, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.default_sample_size = ( self.unet.config.sample_size if hasattr(self, "unet") and self.unet is not None and hasattr(self.unet.config, "sample_size") else 128 ) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None def encode_prompt( self, prompt: str, prompt_2: Optional[str] = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 # textual inversion: procecss multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) # We are only ALWAYS interested in the pooled output of the final text encoder if pooled_prompt_embeds is None and prompt_embeds[0].ndim == 2: pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: # "2" because SDXL always indexes from the penultimate layer. prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt # normalize str to list negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = ( batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 ) uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder if negative_pooled_prompt_embeds is None and negative_prompt_embeds[0].ndim == 2: negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder_2, lora_scale) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, prompt_2, height, width, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=torch.float32) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def _get_add_time_ids( self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None ): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, ( AttnProcessor2_0, XFormersAttnProcessor, ), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: timesteps (`torch.Tensor`): generate embedding vectors at these timesteps embedding_dim (`int`, *optional*, defaults to 512): dimension of the embeddings to generate dtype: data type of the generated embeddings Returns: `torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, timesteps: List[int] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, negative_original_size: Optional[Tuple[int, int]] = None, negative_crops_coords_top_left: Tuple[int, int] = (0, 0), negative_target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. This is set to 1024 by default for the best results. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. denoising_end (`float`, *optional*): When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be completed before it is intentionally prematurely terminated. As a result, the returned sample will still retain a substantial amount of noise as determined by the discrete timesteps selected by the scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) `guidance_scale` is defined as `φ` in equation 16. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Guidance rescale factor should fix overexposure when using zero terminal SNR. original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a specific image resolution. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a target image resolution. It should be as same as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) # 0. Default height and width to unet height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, height, width, callback_steps, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Encode input prompt lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=lora_scale, clip_skip=self.clip_skip, ) # 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Prepare added time ids & embeddings add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids( negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) else: negative_add_time_ids = add_time_ids if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) if ip_adapter_image is not None: image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt) if self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds]) image_embeds = image_embeds.to(device) # 8. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) # 8.1 Apply denoising_end if ( self.denoising_end is not None and isinstance(self.denoising_end, float) and self.denoising_end > 0 and self.denoising_end < 1 ): discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (self.denoising_end * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] # 9. Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} if ip_adapter_image is not None: added_cond_kwargs["image_embeds"] = image_embeds # noise_pred = self.unet( # latent_model_input, # t, # encoder_hidden_states=prompt_embeds, # timestep_cond=timestep_cond, # cross_attention_kwargs=self.cross_attention_kwargs, # added_cond_kwargs=added_cond_kwargs, # return_dict=False, # )[0] noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, added_cond_kwargs=added_cond_kwargs, )["sample"] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: # Based on 3.4. in https://huggingface.co/papers/2305.08891 noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop( "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds ) add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == "latent": # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) @torch.no_grad() def prepare_for_ipex( self, dtype=torch.float32, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, timesteps: List[int] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, negative_original_size: Optional[Tuple[int, int]] = None, negative_crops_coords_top_left: Tuple[int, int] = (0, 0), negative_target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) # 0. Default height and width to unet height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, height, width, callback_steps, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = "cpu" do_classifier_free_guidance = self.do_classifier_free_guidance # 3. Encode input prompt lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=lora_scale, clip_skip=self.clip_skip, ) # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 7. Prepare added time ids & embeddings add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids( negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) else: negative_add_time_ids = add_time_ids if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) if ip_adapter_image is not None: image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt) if self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds]) image_embeds = image_embeds.to(device) dummy = torch.ones(1, dtype=torch.int32) latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, dummy) # predict the noise residual added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} if ip_adapter_image is not None: added_cond_kwargs["image_embeds"] = image_embeds if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) self.unet = self.unet.to(memory_format=torch.channels_last) self.vae.decoder = self.vae.decoder.to(memory_format=torch.channels_last) self.text_encoder = self.text_encoder.to(memory_format=torch.channels_last) unet_input_example = { "sample": latent_model_input, "timestep": dummy, "encoder_hidden_states": prompt_embeds, "added_cond_kwargs": added_cond_kwargs, } vae_decoder_input_example = latents # optimize with ipex if dtype == torch.bfloat16: self.unet = ipex.optimize( self.unet.eval(), dtype=torch.bfloat16, inplace=True, ) self.vae.decoder = ipex.optimize(self.vae.decoder.eval(), dtype=torch.bfloat16, inplace=True) self.text_encoder = ipex.optimize(self.text_encoder.eval(), dtype=torch.bfloat16, inplace=True) elif dtype == torch.float32: self.unet = ipex.optimize( self.unet.eval(), dtype=torch.float32, inplace=True, level="O1", weights_prepack=True, auto_kernel_selection=False, ) self.vae.decoder = ipex.optimize( self.vae.decoder.eval(), dtype=torch.float32, inplace=True, level="O1", weights_prepack=True, auto_kernel_selection=False, ) self.text_encoder = ipex.optimize( self.text_encoder.eval(), dtype=torch.float32, inplace=True, level="O1", weights_prepack=True, auto_kernel_selection=False, ) else: raise ValueError(" The value of 'dtype' should be 'torch.bfloat16' or 'torch.float32' !") # trace unet model to get better performance on IPEX with torch.cpu.amp.autocast(enabled=dtype == torch.bfloat16), torch.no_grad(): unet_trace_model = torch.jit.trace( self.unet, example_kwarg_inputs=unet_input_example, check_trace=False, strict=False ) unet_trace_model = torch.jit.freeze(unet_trace_model) self.unet.forward = unet_trace_model.forward # trace vae.decoder model to get better performance on IPEX with torch.cpu.amp.autocast(enabled=dtype == torch.bfloat16), torch.no_grad(): vae_decoder_trace_model = torch.jit.trace( self.vae.decoder, vae_decoder_input_example, check_trace=False, strict=False ) vae_decoder_trace_model = torch.jit.freeze(vae_decoder_trace_model) self.vae.decoder.forward = vae_decoder_trace_model.forward
diffusers/examples/community/pipeline_stable_diffusion_xl_ipex.py/0
{ "file_path": "diffusers/examples/community/pipeline_stable_diffusion_xl_ipex.py", "repo_id": "diffusers", "token_count": 32238 }
143
import types from typing import List, Optional, Tuple, Union import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPTextModelOutput from diffusers.models import PriorTransformer from diffusers.pipelines import DiffusionPipeline, StableDiffusionImageVariationPipeline from diffusers.schedulers import UnCLIPScheduler from diffusers.utils import logging from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance): image = image.to(device=device) image_embeddings = image # take image as image_embeddings image_embeddings = image_embeddings.unsqueeze(1) # duplicate image embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = image_embeddings.shape image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: uncond_embeddings = torch.zeros_like(image_embeddings) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes image_embeddings = torch.cat([uncond_embeddings, image_embeddings]) return image_embeddings class StableUnCLIPPipeline(DiffusionPipeline): def __init__( self, prior: PriorTransformer, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModelWithProjection, prior_scheduler: UnCLIPScheduler, decoder_pipe_kwargs: Optional[dict] = None, ): super().__init__() decoder_pipe_kwargs = {"image_encoder": None} if decoder_pipe_kwargs is None else decoder_pipe_kwargs decoder_pipe_kwargs["torch_dtype"] = decoder_pipe_kwargs.get("torch_dtype", None) or prior.dtype self.decoder_pipe = StableDiffusionImageVariationPipeline.from_pretrained( "lambdalabs/sd-image-variations-diffusers", **decoder_pipe_kwargs ) # replace `_encode_image` method self.decoder_pipe._encode_image = types.MethodType(_encode_image, self.decoder_pipe) self.register_modules( prior=prior, tokenizer=tokenizer, text_encoder=text_encoder, prior_scheduler=prior_scheduler, ) def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, text_attention_mask: Optional[torch.Tensor] = None, ): if text_model_output is None: batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ) text_input_ids = text_inputs.input_ids text_mask = text_inputs.attention_mask.bool().to(device) if text_input_ids.shape[-1] > self.tokenizer.model_max_length: removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] text_encoder_output = self.text_encoder(text_input_ids.to(device)) text_embeddings = text_encoder_output.text_embeds text_encoder_hidden_states = text_encoder_output.last_hidden_state else: batch_size = text_model_output[0].shape[0] text_embeddings, text_encoder_hidden_states = text_model_output[0], text_model_output[1] text_mask = text_attention_mask text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0) text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens = [""] * batch_size uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) uncond_text_mask = uncond_input.attention_mask.bool().to(device) uncond_embeddings_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) uncond_embeddings = uncond_embeddings_text_encoder_output.text_embeds uncond_text_encoder_hidden_states = uncond_embeddings_text_encoder_output.last_hidden_state # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt) uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return text_embeddings, text_encoder_hidden_states, text_mask @property def _execution_device(self): r""" Returns the device on which the pipeline's models will be executed. After calling `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module hooks. """ if self.device != torch.device("meta") or not hasattr(self.prior, "_hf_hook"): return self.device for module in self.prior.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def to(self, torch_device: Optional[Union[str, torch.device]] = None): self.decoder_pipe.to(torch_device) super().to(torch_device) @torch.no_grad() def __call__( self, prompt: Optional[Union[str, List[str]]] = None, height: Optional[int] = None, width: Optional[int] = None, num_images_per_prompt: int = 1, prior_num_inference_steps: int = 25, generator: Optional[torch.Generator] = None, prior_latents: Optional[torch.Tensor] = None, text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, text_attention_mask: Optional[torch.Tensor] = None, prior_guidance_scale: float = 4.0, decoder_guidance_scale: float = 8.0, decoder_num_inference_steps: int = 50, decoder_num_images_per_prompt: Optional[int] = 1, decoder_eta: float = 0.0, output_type: Optional[str] = "pil", return_dict: bool = True, ): if prompt is not None: if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") else: batch_size = text_model_output[0].shape[0] device = self._execution_device batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0 text_embeddings, text_encoder_hidden_states, text_mask = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output, text_attention_mask ) # prior self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) prior_timesteps_tensor = self.prior_scheduler.timesteps embedding_dim = self.prior.config.embedding_dim prior_latents = self.prepare_latents( (batch_size, embedding_dim), text_embeddings.dtype, device, generator, prior_latents, self.prior_scheduler, ) for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([prior_latents] * 2) if do_classifier_free_guidance else prior_latents predicted_image_embedding = self.prior( latent_model_input, timestep=t, proj_embedding=text_embeddings, encoder_hidden_states=text_encoder_hidden_states, attention_mask=text_mask, ).predicted_image_embedding if do_classifier_free_guidance: predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * ( predicted_image_embedding_text - predicted_image_embedding_uncond ) if i + 1 == prior_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = prior_timesteps_tensor[i + 1] prior_latents = self.prior_scheduler.step( predicted_image_embedding, timestep=t, sample=prior_latents, generator=generator, prev_timestep=prev_timestep, ).prev_sample prior_latents = self.prior.post_process_latents(prior_latents) image_embeddings = prior_latents output = self.decoder_pipe( image=image_embeddings, height=height, width=width, num_inference_steps=decoder_num_inference_steps, guidance_scale=decoder_guidance_scale, generator=generator, output_type=output_type, return_dict=return_dict, num_images_per_prompt=decoder_num_images_per_prompt, eta=decoder_eta, ) return output
diffusers/examples/community/stable_unclip.py/0
{ "file_path": "diffusers/examples/community/stable_unclip.py", "repo_id": "diffusers", "token_count": 5488 }
144
# ControlNet training example [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang and Maneesh Agrawala. This example is based on the [training example in the original ControlNet repository](https://github.com/lllyasviel/ControlNet/blob/main/docs/train.md). It trains a ControlNet to fill circles using a [small synthetic dataset](https://huggingface.co/datasets/fusing/fill50k). ## Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` Then cd in the example folder and run ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell e.g. a notebook ```python from accelerate.utils import write_basic_config write_basic_config() ``` ## Circle filling dataset The original dataset is hosted in the [ControlNet repo](https://huggingface.co/lllyasviel/ControlNet/blob/main/training/fill50k.zip). We re-uploaded it to be compatible with `datasets` [here](https://huggingface.co/datasets/fusing/fill50k). Note that `datasets` handles dataloading within the training script. Our training examples use [Stable Diffusion 1.5](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) as the original set of ControlNet models were trained from it. However, ControlNet can be trained to augment any Stable Diffusion compatible model (such as [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4)) or [stabilityai/stable-diffusion-2-1](https://huggingface.co/stabilityai/stable-diffusion-2-1). ## Training Our training examples use two test conditioning images. They can be downloaded by running ```sh wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png ``` ```bash export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=4 ``` This default configuration requires ~38GB VRAM. By default, the training script logs outputs to tensorboard. Pass `--report_to wandb` to use weights and biases. Gradient accumulation with a smaller batch size can be used to reduce training requirements to ~20 GB VRAM. ```bash export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 ``` ## Training with multiple GPUs `accelerate` allows for seamless multi-GPU training. Follow the instructions [here](https://huggingface.co/docs/accelerate/basic_tutorials/launch) for running distributed training with `accelerate`. Here is an example command: ```bash export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch --mixed_precision="fp16" --multi_gpu train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=4 \ --mixed_precision="fp16" \ --tracker_project_name="controlnet-demo" \ --report_to=wandb ``` ## Example results #### After 300 steps with batch size 8 | | | |-------------------|:-------------------------:| | | red circle with blue background | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png) | ![red circle with blue background](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/red_circle_with_blue_background_300_steps.png) | | | cyan circle with brown floral background | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png) | ![cyan circle with brown floral background](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/cyan_circle_with_brown_floral_background_300_steps.png) | #### After 6000 steps with batch size 8: | | | |-------------------|:-------------------------:| | | red circle with blue background | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png) | ![red circle with blue background](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/red_circle_with_blue_background_6000_steps.png) | | | cyan circle with brown floral background | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png) | ![cyan circle with brown floral background](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/cyan_circle_with_brown_floral_background_6000_steps.png) | ## Training on a 16 GB GPU Optimizations: - Gradient checkpointing - bitsandbyte's 8-bit optimizer [bitandbytes install instructions](https://github.com/TimDettmers/bitsandbytes#requirements--installation). ```bash export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --use_8bit_adam ``` ## Training on a 12 GB GPU Optimizations: - Gradient checkpointing - bitsandbyte's 8-bit optimizer - xformers - set grads to none ```bash export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --use_8bit_adam \ --enable_xformers_memory_efficient_attention \ --set_grads_to_none ``` When using `enable_xformers_memory_efficient_attention`, please make sure to install `xformers` by `pip install xformers`. ## Training on an 8 GB GPU We have not exhaustively tested DeepSpeed support for ControlNet. While the configuration does save memory, we have not confirmed the configuration to train successfully. You will very likely have to make changes to the config to have a successful training run. Optimizations: - Gradient checkpointing - xformers - set grads to none - DeepSpeed stage 2 with parameter and optimizer offloading - fp16 mixed precision [DeepSpeed](https://www.deepspeed.ai/) can offload tensors from VRAM to either CPU or NVME. This requires significantly more RAM (about 25 GB). Use `accelerate config` to enable DeepSpeed stage 2. The relevant parts of the resulting accelerate config file are ```yaml compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 4 offload_optimizer_device: cpu offload_param_device: cpu zero3_init_flag: false zero_stage: 2 distributed_type: DEEPSPEED ``` See [documentation](https://huggingface.co/docs/accelerate/usage_guides/deepspeed) for more DeepSpeed configuration options. Changing the default Adam optimizer to DeepSpeed's Adam `deepspeed.ops.adam.DeepSpeedCPUAdam` gives a substantial speedup but it requires CUDA toolchain with the same version as pytorch. 8-bit optimizer does not seem to be compatible with DeepSpeed at the moment. ```bash export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --enable_xformers_memory_efficient_attention \ --set_grads_to_none \ --mixed_precision fp16 ``` ## Performing inference with the trained ControlNet The trained model can be run the same as the original ControlNet pipeline with the newly trained ControlNet. Set `base_model_path` and `controlnet_path` to the values `--pretrained_model_name_or_path` and `--output_dir` were respectively set to in the training script. ```py from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler from diffusers.utils import load_image import torch base_model_path = "path to model" controlnet_path = "path to controlnet" controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( base_model_path, controlnet=controlnet, torch_dtype=torch.float16 ) # speed up diffusion process with faster scheduler and memory optimization pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) # remove following line if xformers is not installed or when using Torch 2.0. pipe.enable_xformers_memory_efficient_attention() # memory optimization. pipe.enable_model_cpu_offload() control_image = load_image("./conditioning_image_1.png") prompt = "pale golden rod circle with old lace background" # generate image generator = torch.manual_seed(0) image = pipe( prompt, num_inference_steps=20, generator=generator, image=control_image ).images[0] image.save("./output.png") ``` ## Training with Flax/JAX For faster training on TPUs and GPUs you can leverage the flax training example. Follow the instructions above to get the model and dataset before running the script. ### Running on Google Cloud TPU See below for commands to set up a TPU VM(`--accelerator-type v4-8`). For more details about how to set up and use TPUs, refer to [Cloud docs for single VM setup](https://cloud.google.com/tpu/docs/run-calculation-jax). First create a single TPUv4-8 VM and connect to it: ``` ZONE=us-central2-b TPU_TYPE=v4-8 VM_NAME=hg_flax gcloud alpha compute tpus tpu-vm create $VM_NAME \ --zone $ZONE \ --accelerator-type $TPU_TYPE \ --version tpu-vm-v4-base gcloud alpha compute tpus tpu-vm ssh $VM_NAME --zone $ZONE -- \ ``` When connected install JAX `0.4.5`: ```sh pip install "jax[tpu]==0.4.5" -f https://storage.googleapis.com/jax-releases/libtpu_releases.html ``` To verify that JAX was correctly installed, you can run the following command: ```py import jax jax.device_count() ``` This should display the number of TPU cores, which should be 4 on a TPUv4-8 VM. Then install Diffusers and the library's training dependencies: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd in the example folder and run ```bash pip install -U -r requirements_flax.txt ``` If you want to use Weights and Biases logging, you should also install `wandb` now ```bash pip install wandb ``` Now let's downloading two conditioning images that we will use to run validation during the training in order to track our progress ```sh wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png ``` We encourage you to store or share your model with the community. To use huggingface hub, please login to your Hugging Face account, or ([create one](https://huggingface.co/docs/diffusers/main/en/training/hf.co/join) if you don’t have one already): ```sh hf auth login ``` Make sure you have the `MODEL_DIR`,`OUTPUT_DIR` and `HUB_MODEL_ID` environment variables set. The `OUTPUT_DIR` and `HUB_MODEL_ID` variables specify where to save the model to on the Hub: ```bash export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5" export OUTPUT_DIR="runs/fill-circle-{timestamp}" export HUB_MODEL_ID="controlnet-fill-circle" ``` And finally start the training ```bash python3 train_controlnet_flax.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --validation_steps=1000 \ --train_batch_size=2 \ --revision="non-ema" \ --from_pt \ --report_to="wandb" \ --tracker_project_name=$HUB_MODEL_ID \ --num_train_epochs=11 \ --push_to_hub \ --hub_model_id=$HUB_MODEL_ID ``` Since we passed the `--push_to_hub` flag, it will automatically create a model repo under your huggingface account based on `$HUB_MODEL_ID`. By the end of training, the final checkpoint will be automatically stored on the hub. You can find an example model repo [here](https://huggingface.co/YiYiXu/fill-circle-controlnet). Our training script also provides limited support for streaming large datasets from the Hugging Face Hub. In order to enable streaming, one must also set `--max_train_samples`. Here is an example command (from [this blog article](https://huggingface.co/blog/train-your-controlnet)): ```bash export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5" export OUTPUT_DIR="runs/uncanny-faces-{timestamp}" export HUB_MODEL_ID="controlnet-uncanny-faces" python3 train_controlnet_flax.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=multimodalart/facesyntheticsspigacaptioned \ --streaming \ --conditioning_image_column=spiga_seg \ --image_column=image \ --caption_column=image_caption \ --resolution=512 \ --max_train_samples 100000 \ --learning_rate=1e-5 \ --train_batch_size=1 \ --revision="flax" \ --report_to="wandb" \ --tracker_project_name=$HUB_MODEL_ID ``` Note, however, that the performance of the TPUs might get bottlenecked as streaming with `datasets` is not optimized for images. For ensuring maximum throughput, we encourage you to explore the following options: * [Webdataset](https://webdataset.github.io/webdataset/) * [TorchData](https://github.com/pytorch/data) * [TensorFlow Datasets](https://www.tensorflow.org/datasets/tfless_tfds) When work with a larger dataset, you may need to run training process for a long time and it’s useful to save regular checkpoints during the process. You can use the following argument to enable intermediate checkpointing: ```bash --checkpointing_steps=500 ``` This will save the trained model in subfolders of your output_dir. Subfolder names is the number of steps performed so far; for example: a checkpoint saved after 500 training steps would be saved in a subfolder named 500 You can then start your training from this saved checkpoint with ```bash --controlnet_model_name_or_path="./control_out/500" ``` We support training with the Min-SNR weighting strategy proposed in [Efficient Diffusion Training via Min-SNR Weighting Strategy](https://huggingface.co/papers/2303.09556) which helps to achieve faster convergence by rebalancing the loss. To use it, one needs to set the `--snr_gamma` argument. The recommended value when using it is `5.0`. We also support gradient accumulation - it is a technique that lets you use a bigger batch size than your machine would normally be able to fit into memory. You can use `gradient_accumulation_steps` argument to set gradient accumulation steps. The ControlNet author recommends using gradient accumulation to achieve better convergence. Read more [here](https://github.com/lllyasviel/ControlNet/blob/main/docs/train.md#more-consideration-sudden-converge-phenomenon-and-gradient-accumulation). You can **profile your code** with: ```bash --profile_steps==5 ``` Refer to the [JAX documentation on profiling](https://jax.readthedocs.io/en/latest/profiling.html). To inspect the profile trace, you'll have to install and start Tensorboard with the profile plugin: ```bash pip install tensorflow tensorboard-plugin-profile tensorboard --logdir runs/fill-circle-100steps-20230411_165612/ ``` The profile can then be inspected at http://localhost:6006/#profile Sometimes you'll get version conflicts (error messages like `Duplicate plugins for name projector`), which means that you have to uninstall and reinstall all versions of Tensorflow/Tensorboard (e.g. with `pip uninstall tensorflow tf-nightly tensorboard tb-nightly tensorboard-plugin-profile && pip install tf-nightly tbp-nightly tensorboard-plugin-profile`). Note that the debugging functionality of the Tensorboard `profile` plugin is still under active development. Not all views are fully functional, and for example the `trace_viewer` cuts off events after 1M (which can result in all your device traces getting lost if you for example profile the compilation step by accident). ## Support for Stable Diffusion XL We provide a training script for training a ControlNet with [Stable Diffusion XL](https://huggingface.co/papers/2307.01952). Please refer to [README_sdxl.md](./README_sdxl.md) for more details.
diffusers/examples/controlnet/README.md/0
{ "file_path": "diffusers/examples/controlnet/README.md", "repo_id": "diffusers", "token_count": 6085 }
145
# Search models on Civitai and Hugging Face The [auto_diffusers](https://github.com/suzukimain/auto_diffusers) library provides additional functionalities to Diffusers such as searching for models on Civitai and the Hugging Face Hub. Please refer to the original library [here](https://pypi.org/project/auto-diffusers/) ## Installation Before running the scripts, make sure to install the library's training dependencies: > [!IMPORTANT] > To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the installation up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment. ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Set up the pipeline. You can also cd to this folder and run it. ```bash !wget https://raw.githubusercontent.com/suzukimain/auto_diffusers/refs/heads/master/src/auto_diffusers/pipeline_easy.py ``` ## Load from Civitai ```python from pipeline_easy import ( EasyPipelineForText2Image, EasyPipelineForImage2Image, EasyPipelineForInpainting, ) # Text-to-Image pipeline = EasyPipelineForText2Image.from_civitai( "search_word", base_model="SD 1.5", ).to("cuda") # Image-to-Image pipeline = EasyPipelineForImage2Image.from_civitai( "search_word", base_model="SD 1.5", ).to("cuda") # Inpainting pipeline = EasyPipelineForInpainting.from_civitai( "search_word", base_model="SD 1.5", ).to("cuda") ``` ## Load from Hugging Face ```python from pipeline_easy import ( EasyPipelineForText2Image, EasyPipelineForImage2Image, EasyPipelineForInpainting, ) # Text-to-Image pipeline = EasyPipelineForText2Image.from_huggingface( "search_word", checkpoint_format="diffusers", ).to("cuda") # Image-to-Image pipeline = EasyPipelineForImage2Image.from_huggingface( "search_word", checkpoint_format="diffusers", ).to("cuda") # Inpainting pipeline = EasyPipelineForInpainting.from_huggingface( "search_word", checkpoint_format="diffusers", ).to("cuda") ``` ## Search Civitai and Huggingface ```python # Load Lora into the pipeline. pipeline.auto_load_lora_weights("Detail Tweaker") # Load TextualInversion into the pipeline. pipeline.auto_load_textual_inversion("EasyNegative", token="EasyNegative") ``` ### Search Civitai > [!TIP] > **If an error occurs, insert the `token` and run again.** #### `EasyPipeline.from_civitai` parameters | Name | Type | Default | Description | |:---------------:|:----------------------:|:-------------:|:-----------------------------------------------------------------------------------:| | search_word | string, Path | ー | The search query string. Can be a keyword, Civitai URL, local directory or file path. | | model_type | string | `Checkpoint` | The type of model to search for. <br>(for example `Checkpoint`, `TextualInversion`, `Controlnet`, `LORA`, `Hypernetwork`, `AestheticGradient`, `Poses`) | | base_model | string | None | Trained model tag (for example `SD 1.5`, `SD 3.5`, `SDXL 1.0`) | | torch_dtype | string, torch.dtype | None | Override the default `torch.dtype` and load the model with another dtype. | | force_download | bool | False | Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. | | cache_dir | string, Path | None | Path to the folder where cached files are stored. | | resume | bool | False | Whether to resume an incomplete download. | | token | string | None | API token for Civitai authentication. | #### `search_civitai` parameters | Name | Type | Default | Description | |:---------------:|:--------------:|:-------------:|:-----------------------------------------------------------------------------------:| | search_word | string, Path | ー | The search query string. Can be a keyword, Civitai URL, local directory or file path. | | model_type | string | `Checkpoint` | The type of model to search for. <br>(for example `Checkpoint`, `TextualInversion`, `Controlnet`, `LORA`, `Hypernetwork`, `AestheticGradient`, `Poses`) | | base_model | string | None | Trained model tag (for example `SD 1.5`, `SD 3.5`, `SDXL 1.0`) | | download | bool | False | Whether to download the model. | | force_download | bool | False | Whether to force the download if the model already exists. | | cache_dir | string, Path | None | Path to the folder where cached files are stored. | | resume | bool | False | Whether to resume an incomplete download. | | token | string | None | API token for Civitai authentication. | | include_params | bool | False | Whether to include parameters in the returned data. | | skip_error | bool | False | Whether to skip errors and return None. | ### Search Huggingface > [!TIP] > **If an error occurs, insert the `token` and run again.** #### `EasyPipeline.from_huggingface` parameters | Name | Type | Default | Description | |:---------------------:|:-------------------:|:--------------:|:----------------------------------------------------------------:| | search_word | string, Path | ー | The search query string. Can be a keyword, Hugging Face URL, local directory or file path, or a Hugging Face path (`<creator>/<repo>`). | | checkpoint_format | string | `single_file` | The format of the model checkpoint.<br>● `single_file` to search for `single file checkpoint` <br>●`diffusers` to search for `multifolder diffusers format checkpoint` | | torch_dtype | string, torch.dtype | None | Override the default `torch.dtype` and load the model with another dtype. | | force_download | bool | False | Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. | | cache_dir | string, Path | None | Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. | | token | string, bool | None | The token to use as HTTP bearer authorization for remote files. | #### `search_huggingface` parameters | Name | Type | Default | Description | |:---------------------:|:-------------------:|:--------------:|:----------------------------------------------------------------:| | search_word | string, Path | ー | The search query string. Can be a keyword, Hugging Face URL, local directory or file path, or a Hugging Face path (`<creator>/<repo>`). | | checkpoint_format | string | `single_file` | The format of the model checkpoint. <br>● `single_file` to search for `single file checkpoint` <br>●`diffusers` to search for `multifolder diffusers format checkpoint` | | pipeline_tag | string | None | Tag to filter models by pipeline. | | download | bool | False | Whether to download the model. | | force_download | bool | False | Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. | | cache_dir | string, Path | None | Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. | | token | string, bool | None | The token to use as HTTP bearer authorization for remote files. | | include_params | bool | False | Whether to include parameters in the returned data. | | skip_error | bool | False | Whether to skip errors and return None. |
diffusers/examples/model_search/README.md/0
{ "file_path": "diffusers/examples/model_search/README.md", "repo_id": "diffusers", "token_count": 3696 }
146
import os import random import torch import torchvision.transforms as transforms from PIL import Image def recalculate_box_and_verify_if_valid(x, y, w, h, image_size, original_image_size, min_box_size): scale = image_size / min(original_image_size) crop_y = (original_image_size[1] * scale - image_size) // 2 crop_x = (original_image_size[0] * scale - image_size) // 2 x0 = max(x * scale - crop_x, 0) y0 = max(y * scale - crop_y, 0) x1 = min((x + w) * scale - crop_x, image_size) y1 = min((y + h) * scale - crop_y, image_size) if (x1 - x0) * (y1 - y0) / (image_size * image_size) < min_box_size: return False, (None, None, None, None) return True, (x0, y0, x1, y1) class COCODataset(torch.utils.data.Dataset): def __init__( self, data_path, image_path, image_size=512, min_box_size=0.01, max_boxes_per_data=8, tokenizer=None, ): super().__init__() self.min_box_size = min_box_size self.max_boxes_per_data = max_boxes_per_data self.image_size = image_size self.image_path = image_path self.tokenizer = tokenizer self.transforms = transforms.Compose( [ transforms.Resize(image_size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(image_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) self.data_list = torch.load(data_path, map_location="cpu") def __getitem__(self, index): if self.max_boxes_per_data > 99: assert False, "Are you sure setting such large number of boxes per image?" out = {} data = self.data_list[index] image = Image.open(os.path.join(self.image_path, data["file_path"])).convert("RGB") original_image_size = image.size out["pixel_values"] = self.transforms(image) annos = data["annos"] areas, valid_annos = [], [] for anno in annos: # x, y, w, h = anno['bbox'] x0, y0, x1, y1 = anno["bbox"] x, y, w, h = x0, y0, x1 - x0, y1 - y0 valid, (x0, y0, x1, y1) = recalculate_box_and_verify_if_valid( x, y, w, h, self.image_size, original_image_size, self.min_box_size ) if valid: anno["bbox"] = [x0, y0, x1, y1] areas.append((x1 - x0) * (y1 - y0)) valid_annos.append(anno) # Sort according to area and choose the largest N objects wanted_idxs = torch.tensor(areas).sort(descending=True)[1] wanted_idxs = wanted_idxs[: self.max_boxes_per_data] valid_annos = [valid_annos[i] for i in wanted_idxs] out["boxes"] = torch.zeros(self.max_boxes_per_data, 4) out["masks"] = torch.zeros(self.max_boxes_per_data) out["text_embeddings_before_projection"] = torch.zeros(self.max_boxes_per_data, 768) for i, anno in enumerate(valid_annos): out["boxes"][i] = torch.tensor(anno["bbox"]) / self.image_size out["masks"][i] = 1 out["text_embeddings_before_projection"][i] = anno["text_embeddings_before_projection"] prob_drop_boxes = 0.1 if random.random() < prob_drop_boxes: out["masks"][:] = 0 caption = random.choice(data["captions"]) prob_drop_captions = 0.5 if random.random() < prob_drop_captions: caption = "" caption = self.tokenizer( caption, max_length=self.tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt", ) out["caption"] = caption return out def __len__(self): return len(self.data_list)
diffusers/examples/research_projects/gligen/dataset.py/0
{ "file_path": "diffusers/examples/research_projects/gligen/dataset.py", "repo_id": "diffusers", "token_count": 1897 }
147
import argparse import itertools import math import os import random from pathlib import Path from typing import Iterable import numpy as np import PIL import torch import torch.nn.functional as F import torch.utils.checkpoint from accelerate import Accelerator from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import create_repo, upload_folder from neural_compressor.utils import logger from packaging import version from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel from diffusers.optimization import get_scheduler from diffusers.utils import make_image_grid if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: PIL_INTERPOLATION = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } # ------------------------------------------------------------------------------ def save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path): logger.info("Saving embeddings") learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[placeholder_token_id] learned_embeds_dict = {args.placeholder_token: learned_embeds.detach().cpu()} torch.save(learned_embeds_dict, save_path) def parse_args(): parser = argparse.ArgumentParser(description="Example of distillation for quantization on Textual Inversion.") parser.add_argument( "--save_steps", type=int, default=500, help="Save learned_embeds.bin every X updates steps.", ) parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data." ) parser.add_argument( "--placeholder_token", type=str, default=None, required=True, help="A token to use as a placeholder for the concept.", ) parser.add_argument( "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word." ) parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'") parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.") parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution" ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=5000, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default="no", choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." ), ) parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--do_quantization", action="store_true", help="Whether or not to do quantization.") parser.add_argument("--do_distillation", action="store_true", help="Whether or not to do distillation.") parser.add_argument( "--verify_loading", action="store_true", help="Whether or not to verify the loading of the quantized model." ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.train_data_dir is None: raise ValueError("You must specify a train data directory.") return args imagenet_templates_small = [ "a photo of a {}", "a rendering of a {}", "a cropped photo of the {}", "the photo of a {}", "a photo of a clean {}", "a photo of a dirty {}", "a dark photo of the {}", "a photo of my {}", "a photo of the cool {}", "a close-up photo of a {}", "a bright photo of the {}", "a cropped photo of a {}", "a photo of the {}", "a good photo of the {}", "a photo of one {}", "a close-up photo of the {}", "a rendition of the {}", "a photo of the clean {}", "a rendition of a {}", "a photo of a nice {}", "a good photo of a {}", "a photo of the nice {}", "a photo of the small {}", "a photo of the weird {}", "a photo of the large {}", "a photo of a cool {}", "a photo of a small {}", ] imagenet_style_templates_small = [ "a painting in the style of {}", "a rendering in the style of {}", "a cropped painting in the style of {}", "the painting in the style of {}", "a clean painting in the style of {}", "a dirty painting in the style of {}", "a dark painting in the style of {}", "a picture in the style of {}", "a cool painting in the style of {}", "a close-up painting in the style of {}", "a bright painting in the style of {}", "a cropped painting in the style of {}", "a good painting in the style of {}", "a close-up painting in the style of {}", "a rendition in the style of {}", "a nice painting in the style of {}", "a small painting in the style of {}", "a weird painting in the style of {}", "a large painting in the style of {}", ] # Adapted from torch-ema https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py#L14 class EMAModel: """ Exponential Moving Average of models weights """ def __init__(self, parameters: Iterable[torch.nn.Parameter], decay=0.9999): parameters = list(parameters) self.shadow_params = [p.clone().detach() for p in parameters] self.decay = decay self.optimization_step = 0 def get_decay(self, optimization_step): """ Compute the decay factor for the exponential moving average. """ value = (1 + optimization_step) / (10 + optimization_step) return 1 - min(self.decay, value) @torch.no_grad() def step(self, parameters): parameters = list(parameters) self.optimization_step += 1 self.decay = self.get_decay(self.optimization_step) for s_param, param in zip(self.shadow_params, parameters): if param.requires_grad: tmp = self.decay * (s_param - param) s_param.sub_(tmp) else: s_param.copy_(param) torch.cuda.empty_cache() def copy_to(self, parameters: Iterable[torch.nn.Parameter]) -> None: """ Copy current averaged parameters into given collection of parameters. Args: parameters: Iterable of `torch.nn.Parameter`; the parameters to be updated with the stored moving averages. If `None`, the parameters with which this `ExponentialMovingAverage` was initialized will be used. """ parameters = list(parameters) for s_param, param in zip(self.shadow_params, parameters): param.data.copy_(s_param.data) def to(self, device=None, dtype=None) -> None: r"""Move internal buffers of the ExponentialMovingAverage to `device`. Args: device: like `device` argument to `torch.Tensor.to` """ # .to() on the tensors handles None correctly self.shadow_params = [ p.to(device=device, dtype=dtype) if p.is_floating_point() else p.to(device=device) for p in self.shadow_params ] class TextualInversionDataset(Dataset): def __init__( self, data_root, tokenizer, learnable_property="object", # [object, style] size=512, repeats=100, interpolation="bicubic", flip_p=0.5, set="train", placeholder_token="*", center_crop=False, ): self.data_root = data_root self.tokenizer = tokenizer self.learnable_property = learnable_property self.size = size self.placeholder_token = placeholder_token self.center_crop = center_crop self.flip_p = flip_p self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)] self.num_images = len(self.image_paths) self._length = self.num_images if set == "train": self._length = self.num_images * repeats self.interpolation = { "linear": PIL_INTERPOLATION["linear"], "bilinear": PIL_INTERPOLATION["bilinear"], "bicubic": PIL_INTERPOLATION["bicubic"], "lanczos": PIL_INTERPOLATION["lanczos"], }[interpolation] self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p) def __len__(self): return self._length def __getitem__(self, i): example = {} image = Image.open(self.image_paths[i % self.num_images]) if not image.mode == "RGB": image = image.convert("RGB") placeholder_string = self.placeholder_token text = random.choice(self.templates).format(placeholder_string) example["input_ids"] = self.tokenizer( text, padding="max_length", truncation=True, max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids[0] # default to score-sde preprocessing img = np.array(image).astype(np.uint8) if self.center_crop: crop = min(img.shape[0], img.shape[1]) ( h, w, ) = ( img.shape[0], img.shape[1], ) img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2] image = Image.fromarray(img) image = image.resize((self.size, self.size), resample=self.interpolation) image = self.flip_transform(image) image = np.array(image).astype(np.uint8) image = (image / 127.5 - 1.0).astype(np.float32) example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1) return example def freeze_params(params): for param in params: param.requires_grad = False def generate_images(pipeline, prompt="", guidance_scale=7.5, num_inference_steps=50, num_images_per_prompt=1, seed=42): generator = torch.Generator(pipeline.device).manual_seed(seed) images = pipeline( prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, generator=generator, num_images_per_prompt=num_images_per_prompt, ).images _rows = int(math.sqrt(num_images_per_prompt)) grid = make_image_grid(images, rows=_rows, cols=num_images_per_prompt // _rows) return grid def main(): args = parse_args() logging_dir = os.path.join(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with="tensorboard", project_config=accelerator_project_config, ) # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizer and add the placeholder token as a additional special token if args.tokenizer_name: tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) elif args.pretrained_model_name_or_path: tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") # Load models and create wrapper for stable diffusion noise_scheduler = DDPMScheduler.from_config(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder = CLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, ) vae = AutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, ) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, ) train_unet = False # Freeze vae and unet freeze_params(vae.parameters()) if not args.do_quantization and not args.do_distillation: # Add the placeholder token in tokenizer num_added_tokens = tokenizer.add_tokens(args.placeholder_token) if num_added_tokens == 0: raise ValueError( f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different" " `placeholder_token` that is not already in the tokenizer." ) # Convert the initializer_token, placeholder_token to ids token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False) # Check if initializer_token is a single token or a sequence of tokens if len(token_ids) > 1: raise ValueError("The initializer token must be a single token.") initializer_token_id = token_ids[0] placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token) # Resize the token embeddings as we are adding new special tokens to the tokenizer text_encoder.resize_token_embeddings(len(tokenizer)) # Initialise the newly added placeholder token with the embeddings of the initializer token token_embeds = text_encoder.get_input_embeddings().weight.data token_embeds[placeholder_token_id] = token_embeds[initializer_token_id] freeze_params(unet.parameters()) # Freeze all parameters except for the token embeddings in text encoder params_to_freeze = itertools.chain( text_encoder.text_model.encoder.parameters(), text_encoder.text_model.final_layer_norm.parameters(), text_encoder.text_model.embeddings.position_embedding.parameters(), ) freeze_params(params_to_freeze) else: train_unet = True freeze_params(text_encoder.parameters()) if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Initialize the optimizer optimizer = torch.optim.AdamW( # only optimize the unet or embeddings of text_encoder unet.parameters() if train_unet else text_encoder.get_input_embeddings().parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) train_dataset = TextualInversionDataset( data_root=args.train_data_dir, tokenizer=tokenizer, size=args.resolution, placeholder_token=args.placeholder_token, repeats=args.repeats, learnable_property=args.learnable_property, center_crop=args.center_crop, set="train", ) train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, ) if not train_unet: text_encoder = accelerator.prepare(text_encoder) unet.to(accelerator.device) unet.eval() else: unet = accelerator.prepare(unet) text_encoder.to(accelerator.device) text_encoder.eval() optimizer, train_dataloader, lr_scheduler = accelerator.prepare(optimizer, train_dataloader, lr_scheduler) # Move vae to device vae.to(accelerator.device) # Keep vae in eval model as we don't train these vae.eval() compression_manager = None def train_func(model): if train_unet: unet_ = model text_encoder_ = text_encoder else: unet_ = unet text_encoder_ = model # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("textual_inversion", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") global_step = 0 if train_unet and args.use_ema: ema_unet = EMAModel(unet_.parameters()) for epoch in range(args.num_train_epochs): model.train() train_loss = 0.0 for step, batch in enumerate(train_dataloader): with accelerator.accumulate(model): # Convert images to latent space latents = vae.encode(batch["pixel_values"]).latent_dist.sample().detach() latents = latents * 0.18215 # Sample noise that we'll add to the latents noise = torch.randn(latents.shape).to(latents.device) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device ).long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder_(batch["input_ids"])[0] # Predict the noise residual model_pred = unet_(noisy_latents, timesteps, encoder_hidden_states).sample loss = F.mse_loss(model_pred, noise, reduction="none").mean([1, 2, 3]).mean() if train_unet and compression_manager: unet_inputs = { "sample": noisy_latents, "timestep": timesteps, "encoder_hidden_states": encoder_hidden_states, } loss = compression_manager.callbacks.on_after_compute_loss(unet_inputs, model_pred, loss) # Gather the losses across all processes for logging (if we use distributed training). avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() train_loss += avg_loss.item() / args.gradient_accumulation_steps # Backpropagate accelerator.backward(loss) if train_unet: if accelerator.sync_gradients: accelerator.clip_grad_norm_(unet_.parameters(), args.max_grad_norm) else: # Zero out the gradients for all token embeddings except the newly added # embeddings for the concept, as we only want to optimize the concept embeddings if accelerator.num_processes > 1: grads = text_encoder_.module.get_input_embeddings().weight.grad else: grads = text_encoder_.get_input_embeddings().weight.grad # Get the index for tokens that we want to zero the grads for index_grads_to_zero = torch.arange(len(tokenizer)) != placeholder_token_id grads.data[index_grads_to_zero, :] = grads.data[index_grads_to_zero, :].fill_(0) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: if train_unet and args.use_ema: ema_unet.step(unet_.parameters()) progress_bar.update(1) global_step += 1 accelerator.log({"train_loss": train_loss}, step=global_step) train_loss = 0.0 if not train_unet and global_step % args.save_steps == 0: save_path = os.path.join(args.output_dir, f"learned_embeds-steps-{global_step}.bin") save_progress(text_encoder_, placeholder_token_id, accelerator, args, save_path) logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break accelerator.wait_for_everyone() if train_unet and args.use_ema: ema_unet.copy_to(unet_.parameters()) if not train_unet: return text_encoder_ if not train_unet: text_encoder = train_func(text_encoder) else: import copy model = copy.deepcopy(unet) confs = [] if args.do_quantization: from neural_compressor import QuantizationAwareTrainingConfig q_conf = QuantizationAwareTrainingConfig() confs.append(q_conf) if args.do_distillation: teacher_model = copy.deepcopy(model) def attention_fetcher(x): return x.sample layer_mappings = [ [ [ "conv_in", ] ], [ [ "time_embedding", ] ], [["down_blocks.0.attentions.0", attention_fetcher]], [["down_blocks.0.attentions.1", attention_fetcher]], [ [ "down_blocks.0.resnets.0", ] ], [ [ "down_blocks.0.resnets.1", ] ], [ [ "down_blocks.0.downsamplers.0", ] ], [["down_blocks.1.attentions.0", attention_fetcher]], [["down_blocks.1.attentions.1", attention_fetcher]], [ [ "down_blocks.1.resnets.0", ] ], [ [ "down_blocks.1.resnets.1", ] ], [ [ "down_blocks.1.downsamplers.0", ] ], [["down_blocks.2.attentions.0", attention_fetcher]], [["down_blocks.2.attentions.1", attention_fetcher]], [ [ "down_blocks.2.resnets.0", ] ], [ [ "down_blocks.2.resnets.1", ] ], [ [ "down_blocks.2.downsamplers.0", ] ], [ [ "down_blocks.3.resnets.0", ] ], [ [ "down_blocks.3.resnets.1", ] ], [ [ "up_blocks.0.resnets.0", ] ], [ [ "up_blocks.0.resnets.1", ] ], [ [ "up_blocks.0.resnets.2", ] ], [ [ "up_blocks.0.upsamplers.0", ] ], [["up_blocks.1.attentions.0", attention_fetcher]], [["up_blocks.1.attentions.1", attention_fetcher]], [["up_blocks.1.attentions.2", attention_fetcher]], [ [ "up_blocks.1.resnets.0", ] ], [ [ "up_blocks.1.resnets.1", ] ], [ [ "up_blocks.1.resnets.2", ] ], [ [ "up_blocks.1.upsamplers.0", ] ], [["up_blocks.2.attentions.0", attention_fetcher]], [["up_blocks.2.attentions.1", attention_fetcher]], [["up_blocks.2.attentions.2", attention_fetcher]], [ [ "up_blocks.2.resnets.0", ] ], [ [ "up_blocks.2.resnets.1", ] ], [ [ "up_blocks.2.resnets.2", ] ], [ [ "up_blocks.2.upsamplers.0", ] ], [["up_blocks.3.attentions.0", attention_fetcher]], [["up_blocks.3.attentions.1", attention_fetcher]], [["up_blocks.3.attentions.2", attention_fetcher]], [ [ "up_blocks.3.resnets.0", ] ], [ [ "up_blocks.3.resnets.1", ] ], [ [ "up_blocks.3.resnets.2", ] ], [["mid_block.attentions.0", attention_fetcher]], [ [ "mid_block.resnets.0", ] ], [ [ "mid_block.resnets.1", ] ], [ [ "conv_out", ] ], ] layer_names = [layer_mapping[0][0] for layer_mapping in layer_mappings] if not set(layer_names).issubset([n[0] for n in model.named_modules()]): raise ValueError( "Provided model is not compatible with the default layer_mappings, " 'please use the model fine-tuned from "CompVis/stable-diffusion-v1-4", ' "or modify the layer_mappings variable to fit your model." f"\nDefault layer_mappings are as such:\n{layer_mappings}" ) from neural_compressor.config import DistillationConfig, IntermediateLayersKnowledgeDistillationLossConfig distillation_criterion = IntermediateLayersKnowledgeDistillationLossConfig( layer_mappings=layer_mappings, loss_types=["MSE"] * len(layer_mappings), loss_weights=[1.0 / len(layer_mappings)] * len(layer_mappings), add_origin_loss=True, ) d_conf = DistillationConfig(teacher_model=teacher_model, criterion=distillation_criterion) confs.append(d_conf) from neural_compressor.training import prepare_compression compression_manager = prepare_compression(model, confs) compression_manager.callbacks.on_train_begin() model = compression_manager.model train_func(model) compression_manager.callbacks.on_train_end() # Save the resulting model and its corresponding configuration in the given directory model.save(args.output_dir) logger.info(f"Optimized model saved to: {args.output_dir}.") # change to framework model for further use model = model.model # Create the pipeline using using the trained modules and save it. templates = imagenet_style_templates_small if args.learnable_property == "style" else imagenet_templates_small prompt = templates[0].format(args.placeholder_token) if accelerator.is_main_process: pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=accelerator.unwrap_model(text_encoder), vae=vae, unet=accelerator.unwrap_model(unet), tokenizer=tokenizer, ) pipeline.save_pretrained(args.output_dir) pipeline = pipeline.to(unet.device) baseline_model_images = generate_images(pipeline, prompt=prompt, seed=args.seed) baseline_model_images.save( os.path.join(args.output_dir, "{}_baseline_model.png".format("_".join(prompt.split()))) ) if not train_unet: # Also save the newly trained embeddings save_path = os.path.join(args.output_dir, "learned_embeds.bin") save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path) else: setattr(pipeline, "unet", accelerator.unwrap_model(model)) if args.do_quantization: pipeline = pipeline.to(torch.device("cpu")) optimized_model_images = generate_images(pipeline, prompt=prompt, seed=args.seed) optimized_model_images.save( os.path.join(args.output_dir, "{}_optimized_model.png".format("_".join(prompt.split()))) ) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if args.do_quantization and args.verify_loading: # Load the model obtained after Intel Neural Compressor quantization from neural_compressor.utils.pytorch import load loaded_model = load(args.output_dir, model=unet) loaded_model.eval() setattr(pipeline, "unet", loaded_model) if args.do_quantization: pipeline = pipeline.to(torch.device("cpu")) loaded_model_images = generate_images(pipeline, prompt=prompt, seed=args.seed) if loaded_model_images != optimized_model_images: logger.info("The quantized model was not successfully loaded.") else: logger.info("The quantized model was successfully loaded.") if __name__ == "__main__": main()
diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/textual_inversion.py/0
{ "file_path": "diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/textual_inversion.py", "repo_id": "diffusers", "token_count": 18301 }
148
## [Deprecated] Multi Token Textual Inversion **IMPORTART: This research project is deprecated. Multi Token Textual Inversion is now supported natively in [the official textual inversion example](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion#running-locally-with-pytorch).** The author of this project is [Isamu Isozaki](https://github.com/isamu-isozaki) - please make sure to tag the author for issue and PRs as well as @patrickvonplaten. We add multi token support to textual inversion. I added 1. num_vec_per_token for the number of used to reference that token 2. progressive_tokens for progressively training the token from 1 token to 2 token etc 3. progressive_tokens_max_steps for the max number of steps until we start full training 4. vector_shuffle to shuffle vectors Feel free to add these options to your training! In practice num_vec_per_token around 10+vector shuffle works great! ## Textual Inversion fine-tuning example [Textual inversion](https://huggingface.co/papers/2208.01618) is a method to personalize text2image models like stable diffusion on your own images using just 3-5 examples. The `textual_inversion.py` script shows how to implement the training procedure and adapt it for stable diffusion. ## Running on Colab Colab for training [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) Colab for inference [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd in the example folder and run ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` ### Cat toy example You need to accept the model license before downloading or using the weights. In this example we'll use model version `v1-5`, so you'll need to visit [its card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5), read the license and tick the checkbox if you agree. You have to be a registered user in 🤗 Hugging Face Hub, and you'll also need to use an access token for the code to work. For more information on access tokens, please refer to [this section of the documentation](https://huggingface.co/docs/hub/security-tokens). Run the following command to authenticate your token ```bash hf auth login ``` If you have already cloned the repo, then you won't need to go through these steps. <br> Now let's get our dataset.Download 3-4 images from [here](https://drive.google.com/drive/folders/1fmJMs25nxS_rSNqS5hTcRdLem_YQXbq5) and save them in a directory. This will be our training data. And launch the training using **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** ```bash export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5" export DATA_DIR="path-to-dir-containing-images" accelerate launch textual_inversion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<cat-toy>" --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 --scale_lr \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --output_dir="textual_inversion_cat" ``` A full training run takes ~1 hour on one V100 GPU. ### Inference Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `placeholder_token` in your prompt. ```python from diffusers import StableDiffusionPipeline model_id = "path-to-your-trained-model" pipe = StableDiffusionPipeline.from_pretrained(model_id,torch_dtype=torch.float16).to("cuda") prompt = "A <cat-toy> backpack" image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("cat-backpack.png") ``` ## Training with Flax/JAX For faster training on TPUs and GPUs you can leverage the flax training example. Follow the instructions above to get the model and dataset before running the script. Before running the scripts, make sure to install the library's training dependencies: ```bash pip install -U -r requirements_flax.txt ``` ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export DATA_DIR="path-to-dir-containing-images" python textual_inversion_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<cat-toy>" --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 --scale_lr \ --output_dir="textual_inversion_cat" ``` It should be at least 70% faster than the PyTorch script with the same configuration. ### Training with xformers: You can enable memory efficient attention by [installing xFormers](https://github.com/facebookresearch/xformers#installing-xformers) and padding the `--enable_xformers_memory_efficient_attention` argument to the script. This is not available with the Flax/JAX implementation.
diffusers/examples/research_projects/multi_token_textual_inversion/README.md/0
{ "file_path": "diffusers/examples/research_projects/multi_token_textual_inversion/README.md", "repo_id": "diffusers", "token_count": 1852 }
149
## Diffusers examples with ONNXRuntime optimizations **This research project is not actively maintained by the diffusers team. For any questions or comments, please contact Isamu Isozaki(isamu-isozaki) on github with any questions.** The aim of this project is to provide retrieval augmented diffusion models to diffusers!
diffusers/examples/research_projects/rdm/README.md/0
{ "file_path": "diffusers/examples/research_projects/rdm/README.md", "repo_id": "diffusers", "token_count": 75 }
150
#!/usr/bin/env python # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import shutil import sys import tempfile from diffusers import DiffusionPipeline, UNet2DConditionModel # noqa: E402 sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class TextToImage(ExamplesTestsAccelerate): def test_text_to_image(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "unet", "diffusion_pytorch_model.safetensors"))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "scheduler", "scheduler_config.json"))) def test_text_to_image_checkpointing(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 4, checkpointing_steps == 2 # Should create checkpoints at steps 2, 4 initial_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 4 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --seed=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}, ) # check can run an intermediate checkpoint unet = UNet2DConditionModel.from_pretrained(tmpdir, subfolder="checkpoint-2/unet") pipe = DiffusionPipeline.from_pretrained(pretrained_model_name_or_path, unet=unet, safety_checker=None) pipe(prompt, num_inference_steps=1) # Remove checkpoint 2 so that we can check only later checkpoints exist after resuming shutil.rmtree(os.path.join(tmpdir, "checkpoint-2")) # Run training script for 2 total steps resuming from checkpoint 4 resume_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=1 --resume_from_checkpoint=checkpoint-4 --seed=0 """.split() run_command(self._launch_args + resume_run_args) # check can run new fully trained pipeline pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=1) # no checkpoint-2 -> check old checkpoints do not exist # check new checkpoints exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-5"}, ) def test_text_to_image_checkpointing_use_ema(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 4, checkpointing_steps == 2 # Should create checkpoints at steps 2, 4 initial_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 4 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --use_ema --seed=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=2) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}, ) # check can run an intermediate checkpoint unet = UNet2DConditionModel.from_pretrained(tmpdir, subfolder="checkpoint-2/unet") pipe = DiffusionPipeline.from_pretrained(pretrained_model_name_or_path, unet=unet, safety_checker=None) pipe(prompt, num_inference_steps=1) # Remove checkpoint 2 so that we can check only later checkpoints exist after resuming shutil.rmtree(os.path.join(tmpdir, "checkpoint-2")) # Run training script for 2 total steps resuming from checkpoint 4 resume_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=1 --resume_from_checkpoint=checkpoint-4 --use_ema --seed=0 """.split() run_command(self._launch_args + resume_run_args) # check can run new fully trained pipeline pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=1) # no checkpoint-2 -> check old checkpoints do not exist # check new checkpoints exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-5"}, ) def test_text_to_image_checkpointing_checkpoints_total_limit(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 6, checkpointing_steps == 2, checkpoints_total_limit == 2 # Should create checkpoints at steps 2, 4, 6 # with checkpoint at step 2 deleted initial_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 6 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --checkpoints_total_limit=2 --seed=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist # checkpoint-2 should have been deleted self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}) def test_text_to_image_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 4, checkpointing_steps == 2 # Should create checkpoints at steps 2, 4 initial_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 4 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --seed=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}, ) # resume and we should try to checkpoint at 6, where we'll have to remove # checkpoint-2 and checkpoint-4 instead of just a single previous checkpoint resume_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 8 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-4 --checkpoints_total_limit=2 --seed=0 """.split() run_command(self._launch_args + resume_run_args) pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"}, ) class TextToImageSDXL(ExamplesTestsAccelerate): def test_text_to_image_sdxl(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/text_to_image/train_text_to_image_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "unet", "diffusion_pytorch_model.safetensors"))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "scheduler", "scheduler_config.json")))
diffusers/examples/text_to_image/test_text_to_image.py/0
{ "file_path": "diffusers/examples/text_to_image/test_text_to_image.py", "repo_id": "diffusers", "token_count": 7427 }
151
## Training an unconditional diffusion model Creating a training image set is [described in a different document](https://huggingface.co/docs/datasets/image_process#image-datasets). ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd in the example folder and run ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` ### Unconditional Flowers The command to train a DDPM UNet model on the Oxford Flowers dataset: ```bash accelerate launch train_unconditional.py \ --dataset_name="huggan/flowers-102-categories" \ --resolution=64 --center_crop --random_flip \ --output_dir="ddpm-ema-flowers-64" \ --train_batch_size=16 \ --num_epochs=100 \ --gradient_accumulation_steps=1 \ --use_ema \ --learning_rate=1e-4 \ --lr_warmup_steps=500 \ --mixed_precision=no \ --push_to_hub ``` An example trained model: https://huggingface.co/anton-l/ddpm-ema-flowers-64 A full training run takes 2 hours on 4xV100 GPUs. <img src="https://user-images.githubusercontent.com/26864830/180248660-a0b143d0-b89a-42c5-8656-2ebf6ece7e52.png" width="700" /> ### Unconditional Pokemon The command to train a DDPM UNet model on the Pokemon dataset: ```bash accelerate launch train_unconditional.py \ --dataset_name="huggan/pokemon" \ --resolution=64 --center_crop --random_flip \ --output_dir="ddpm-ema-pokemon-64" \ --train_batch_size=16 \ --num_epochs=100 \ --gradient_accumulation_steps=1 \ --use_ema \ --learning_rate=1e-4 \ --lr_warmup_steps=500 \ --mixed_precision=no \ --push_to_hub ``` An example trained model: https://huggingface.co/anton-l/ddpm-ema-pokemon-64 A full training run takes 2 hours on 4xV100 GPUs. <img src="https://user-images.githubusercontent.com/26864830/180248200-928953b4-db38-48db-b0c6-8b740fe6786f.png" width="700" /> ### Training with multiple GPUs `accelerate` allows for seamless multi-GPU training. Follow the instructions [here](https://huggingface.co/docs/accelerate/basic_tutorials/launch) for running distributed training with `accelerate`. Here is an example command: ```bash accelerate launch --mixed_precision="fp16" --multi_gpu train_unconditional.py \ --dataset_name="huggan/pokemon" \ --resolution=64 --center_crop --random_flip \ --output_dir="ddpm-ema-pokemon-64" \ --train_batch_size=16 \ --num_epochs=100 \ --gradient_accumulation_steps=1 \ --use_ema \ --learning_rate=1e-4 \ --lr_warmup_steps=500 \ --mixed_precision="fp16" \ --logger="wandb" ``` To be able to use Weights and Biases (`wandb`) as a logger you need to install the library: `pip install wandb`. ### Using your own data To use your own dataset, there are 2 ways: - you can either provide your own folder as `--train_data_dir` - or you can upload your dataset to the hub (possibly as a private repo, if you prefer so), and simply pass the `--dataset_name` argument. Below, we explain both in more detail. #### Provide the dataset as a folder If you provide your own folders with images, the script expects the following directory structure: ```bash data_dir/xxx.png data_dir/xxy.png data_dir/[...]/xxz.png ``` In other words, the script will take care of gathering all images inside the folder. You can then run the script like this: ```bash accelerate launch train_unconditional.py \ --train_data_dir <path-to-train-directory> \ <other-arguments> ``` Internally, the script will use the [`ImageFolder`](https://huggingface.co/docs/datasets/v2.0.0/en/image_process#imagefolder) feature which will automatically turn the folders into 🤗 Dataset objects. #### Upload your data to the hub, as a (possibly private) repo It's very easy (and convenient) to upload your image dataset to the hub using the [`ImageFolder`](https://huggingface.co/docs/datasets/v2.0.0/en/image_process#imagefolder) feature available in 🤗 Datasets. Simply do the following: ```python from datasets import load_dataset # example 1: local folder dataset = load_dataset("imagefolder", data_dir="path_to_your_folder") # example 2: local files (supported formats are tar, gzip, zip, xz, rar, zstd) dataset = load_dataset("imagefolder", data_files="path_to_zip_file") # example 3: remote files (supported formats are tar, gzip, zip, xz, rar, zstd) dataset = load_dataset("imagefolder", data_files="https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip") # example 4: providing several splits dataset = load_dataset("imagefolder", data_files={"train": ["path/to/file1", "path/to/file2"], "test": ["path/to/file3", "path/to/file4"]}) ``` `ImageFolder` will create an `image` column containing the PIL-encoded images. Next, push it to the hub! ```python # assuming you have ran the hf auth login command in a terminal dataset.push_to_hub("name_of_your_dataset") # if you want to push to a private repo, simply pass private=True: dataset.push_to_hub("name_of_your_dataset", private=True) ``` and that's it! You can now train your model by simply setting the `--dataset_name` argument to the name of your dataset on the hub. More on this can also be found in [this blog post](https://huggingface.co/blog/image-search-datasets).
diffusers/examples/unconditional_image_generation/README.md/0
{ "file_path": "diffusers/examples/unconditional_image_generation/README.md", "repo_id": "diffusers", "token_count": 1938 }
152
import argparse from typing import Dict import torch import torch.nn as nn from diffusers import SparseControlNetModel KEYS_RENAME_MAPPING = { ".attention_blocks.0": ".attn1", ".attention_blocks.1": ".attn2", ".attn1.pos_encoder": ".pos_embed", ".ff_norm": ".norm3", ".norms.0": ".norm1", ".norms.1": ".norm2", ".temporal_transformer": "", } def convert(original_state_dict: Dict[str, nn.Module]) -> Dict[str, nn.Module]: converted_state_dict = {} for key in list(original_state_dict.keys()): renamed_key = key for new_name, old_name in KEYS_RENAME_MAPPING.items(): renamed_key = renamed_key.replace(new_name, old_name) converted_state_dict[renamed_key] = original_state_dict.pop(key) return converted_state_dict def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--ckpt_path", type=str, required=True, help="Path to checkpoint") parser.add_argument("--output_path", type=str, required=True, help="Path to output directory") parser.add_argument( "--max_motion_seq_length", type=int, default=32, help="Max motion sequence length supported by the motion adapter", ) parser.add_argument( "--conditioning_channels", type=int, default=4, help="Number of channels in conditioning input to controlnet" ) parser.add_argument( "--use_simplified_condition_embedding", action="store_true", default=False, help="Whether or not to use simplified condition embedding. When `conditioning_channels==4` i.e. latent inputs, set this to `True`. When `conditioning_channels==3` i.e. image inputs, set this to `False`", ) parser.add_argument( "--save_fp16", action="store_true", default=False, help="Whether or not to save model in fp16 precision along with fp32", ) parser.add_argument( "--push_to_hub", action="store_true", default=False, help="Whether or not to push saved model to the HF hub" ) return parser.parse_args() if __name__ == "__main__": args = get_args() state_dict = torch.load(args.ckpt_path, map_location="cpu") if "state_dict" in state_dict.keys(): state_dict: dict = state_dict["state_dict"] controlnet = SparseControlNetModel( conditioning_channels=args.conditioning_channels, motion_max_seq_length=args.max_motion_seq_length, use_simplified_condition_embedding=args.use_simplified_condition_embedding, ) state_dict = convert(state_dict) controlnet.load_state_dict(state_dict, strict=True) controlnet.save_pretrained(args.output_path, push_to_hub=args.push_to_hub) if args.save_fp16: controlnet = controlnet.to(dtype=torch.float16) controlnet.save_pretrained(args.output_path, variant="fp16", push_to_hub=args.push_to_hub)
diffusers/scripts/convert_animatediff_sparsectrl_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_animatediff_sparsectrl_to_diffusers.py", "repo_id": "diffusers", "token_count": 1144 }
153
import argparse from pathlib import Path from typing import Any, Dict import torch from accelerate import init_empty_weights from safetensors.torch import load_file from transformers import T5EncoderModel, T5Tokenizer from diffusers import ( AutoencoderKLLTXVideo, FlowMatchEulerDiscreteScheduler, LTXConditionPipeline, LTXLatentUpsamplePipeline, LTXPipeline, LTXVideoTransformer3DModel, ) from diffusers.pipelines.ltx.modeling_latent_upsampler import LTXLatentUpsamplerModel def remove_keys_(key: str, state_dict: Dict[str, Any]): state_dict.pop(key) TOKENIZER_MAX_LENGTH = 128 TRANSFORMER_KEYS_RENAME_DICT = { "patchify_proj": "proj_in", "adaln_single": "time_embed", "q_norm": "norm_q", "k_norm": "norm_k", } TRANSFORMER_SPECIAL_KEYS_REMAP = { "vae": remove_keys_, } VAE_KEYS_RENAME_DICT = { # decoder "up_blocks.0": "mid_block", "up_blocks.1": "up_blocks.0", "up_blocks.2": "up_blocks.1.upsamplers.0", "up_blocks.3": "up_blocks.1", "up_blocks.4": "up_blocks.2.conv_in", "up_blocks.5": "up_blocks.2.upsamplers.0", "up_blocks.6": "up_blocks.2", "up_blocks.7": "up_blocks.3.conv_in", "up_blocks.8": "up_blocks.3.upsamplers.0", "up_blocks.9": "up_blocks.3", # encoder "down_blocks.0": "down_blocks.0", "down_blocks.1": "down_blocks.0.downsamplers.0", "down_blocks.2": "down_blocks.0.conv_out", "down_blocks.3": "down_blocks.1", "down_blocks.4": "down_blocks.1.downsamplers.0", "down_blocks.5": "down_blocks.1.conv_out", "down_blocks.6": "down_blocks.2", "down_blocks.7": "down_blocks.2.downsamplers.0", "down_blocks.8": "down_blocks.3", "down_blocks.9": "mid_block", # common "conv_shortcut": "conv_shortcut.conv", "res_blocks": "resnets", "norm3.norm": "norm3", "per_channel_statistics.mean-of-means": "latents_mean", "per_channel_statistics.std-of-means": "latents_std", } VAE_091_RENAME_DICT = { # decoder "up_blocks.0": "mid_block", "up_blocks.1": "up_blocks.0.upsamplers.0", "up_blocks.2": "up_blocks.0", "up_blocks.3": "up_blocks.1.upsamplers.0", "up_blocks.4": "up_blocks.1", "up_blocks.5": "up_blocks.2.upsamplers.0", "up_blocks.6": "up_blocks.2", "up_blocks.7": "up_blocks.3.upsamplers.0", "up_blocks.8": "up_blocks.3", # common "last_time_embedder": "time_embedder", "last_scale_shift_table": "scale_shift_table", } VAE_095_RENAME_DICT = { # decoder "up_blocks.0": "mid_block", "up_blocks.1": "up_blocks.0.upsamplers.0", "up_blocks.2": "up_blocks.0", "up_blocks.3": "up_blocks.1.upsamplers.0", "up_blocks.4": "up_blocks.1", "up_blocks.5": "up_blocks.2.upsamplers.0", "up_blocks.6": "up_blocks.2", "up_blocks.7": "up_blocks.3.upsamplers.0", "up_blocks.8": "up_blocks.3", # encoder "down_blocks.0": "down_blocks.0", "down_blocks.1": "down_blocks.0.downsamplers.0", "down_blocks.2": "down_blocks.1", "down_blocks.3": "down_blocks.1.downsamplers.0", "down_blocks.4": "down_blocks.2", "down_blocks.5": "down_blocks.2.downsamplers.0", "down_blocks.6": "down_blocks.3", "down_blocks.7": "down_blocks.3.downsamplers.0", "down_blocks.8": "mid_block", # common "last_time_embedder": "time_embedder", "last_scale_shift_table": "scale_shift_table", } VAE_SPECIAL_KEYS_REMAP = { "per_channel_statistics.channel": remove_keys_, "per_channel_statistics.mean-of-means": remove_keys_, "per_channel_statistics.mean-of-stds": remove_keys_, "model.diffusion_model": remove_keys_, } def get_state_dict(saved_dict: Dict[str, Any]) -> Dict[str, Any]: state_dict = saved_dict if "model" in saved_dict.keys(): state_dict = state_dict["model"] if "module" in saved_dict.keys(): state_dict = state_dict["module"] if "state_dict" in saved_dict.keys(): state_dict = state_dict["state_dict"] return state_dict def update_state_dict_inplace(state_dict: Dict[str, Any], old_key: str, new_key: str) -> Dict[str, Any]: state_dict[new_key] = state_dict.pop(old_key) def convert_transformer(ckpt_path: str, config, dtype: torch.dtype): PREFIX_KEY = "model.diffusion_model." original_state_dict = get_state_dict(load_file(ckpt_path)) with init_empty_weights(): transformer = LTXVideoTransformer3DModel(**config) for key in list(original_state_dict.keys()): new_key = key[:] if new_key.startswith(PREFIX_KEY): new_key = key[len(PREFIX_KEY) :] for replace_key, rename_key in TRANSFORMER_KEYS_RENAME_DICT.items(): new_key = new_key.replace(replace_key, rename_key) update_state_dict_inplace(original_state_dict, key, new_key) for key in list(original_state_dict.keys()): for special_key, handler_fn_inplace in TRANSFORMER_SPECIAL_KEYS_REMAP.items(): if special_key not in key: continue handler_fn_inplace(key, original_state_dict) transformer.load_state_dict(original_state_dict, strict=True, assign=True) return transformer def convert_vae(ckpt_path: str, config, dtype: torch.dtype): PREFIX_KEY = "vae." original_state_dict = get_state_dict(load_file(ckpt_path)) with init_empty_weights(): vae = AutoencoderKLLTXVideo(**config) for key in list(original_state_dict.keys()): new_key = key[:] if new_key.startswith(PREFIX_KEY): new_key = key[len(PREFIX_KEY) :] for replace_key, rename_key in VAE_KEYS_RENAME_DICT.items(): new_key = new_key.replace(replace_key, rename_key) update_state_dict_inplace(original_state_dict, key, new_key) for key in list(original_state_dict.keys()): for special_key, handler_fn_inplace in VAE_SPECIAL_KEYS_REMAP.items(): if special_key not in key: continue handler_fn_inplace(key, original_state_dict) vae.load_state_dict(original_state_dict, strict=True, assign=True) return vae def convert_spatial_latent_upsampler(ckpt_path: str, config, dtype: torch.dtype): original_state_dict = get_state_dict(load_file(ckpt_path)) with init_empty_weights(): latent_upsampler = LTXLatentUpsamplerModel(**config) latent_upsampler.load_state_dict(original_state_dict, strict=True, assign=True) latent_upsampler.to(dtype) return latent_upsampler def get_transformer_config(version: str) -> Dict[str, Any]: if version == "0.9.7": config = { "in_channels": 128, "out_channels": 128, "patch_size": 1, "patch_size_t": 1, "num_attention_heads": 32, "attention_head_dim": 128, "cross_attention_dim": 4096, "num_layers": 48, "activation_fn": "gelu-approximate", "qk_norm": "rms_norm_across_heads", "norm_elementwise_affine": False, "norm_eps": 1e-6, "caption_channels": 4096, "attention_bias": True, "attention_out_bias": True, } else: config = { "in_channels": 128, "out_channels": 128, "patch_size": 1, "patch_size_t": 1, "num_attention_heads": 32, "attention_head_dim": 64, "cross_attention_dim": 2048, "num_layers": 28, "activation_fn": "gelu-approximate", "qk_norm": "rms_norm_across_heads", "norm_elementwise_affine": False, "norm_eps": 1e-6, "caption_channels": 4096, "attention_bias": True, "attention_out_bias": True, } return config def get_vae_config(version: str) -> Dict[str, Any]: if version in ["0.9.0"]: config = { "in_channels": 3, "out_channels": 3, "latent_channels": 128, "block_out_channels": (128, 256, 512, 512), "down_block_types": ( "LTXVideoDownBlock3D", "LTXVideoDownBlock3D", "LTXVideoDownBlock3D", "LTXVideoDownBlock3D", ), "decoder_block_out_channels": (128, 256, 512, 512), "layers_per_block": (4, 3, 3, 3, 4), "decoder_layers_per_block": (4, 3, 3, 3, 4), "spatio_temporal_scaling": (True, True, True, False), "decoder_spatio_temporal_scaling": (True, True, True, False), "decoder_inject_noise": (False, False, False, False, False), "downsample_type": ("conv", "conv", "conv", "conv"), "upsample_residual": (False, False, False, False), "upsample_factor": (1, 1, 1, 1), "patch_size": 4, "patch_size_t": 1, "resnet_norm_eps": 1e-6, "scaling_factor": 1.0, "encoder_causal": True, "decoder_causal": False, "timestep_conditioning": False, } elif version in ["0.9.1"]: config = { "in_channels": 3, "out_channels": 3, "latent_channels": 128, "block_out_channels": (128, 256, 512, 512), "down_block_types": ( "LTXVideoDownBlock3D", "LTXVideoDownBlock3D", "LTXVideoDownBlock3D", "LTXVideoDownBlock3D", ), "decoder_block_out_channels": (256, 512, 1024), "layers_per_block": (4, 3, 3, 3, 4), "decoder_layers_per_block": (5, 6, 7, 8), "spatio_temporal_scaling": (True, True, True, False), "decoder_spatio_temporal_scaling": (True, True, True), "decoder_inject_noise": (True, True, True, False), "downsample_type": ("conv", "conv", "conv", "conv"), "upsample_residual": (True, True, True), "upsample_factor": (2, 2, 2), "timestep_conditioning": True, "patch_size": 4, "patch_size_t": 1, "resnet_norm_eps": 1e-6, "scaling_factor": 1.0, "encoder_causal": True, "decoder_causal": False, } VAE_KEYS_RENAME_DICT.update(VAE_091_RENAME_DICT) elif version in ["0.9.5"]: config = { "in_channels": 3, "out_channels": 3, "latent_channels": 128, "block_out_channels": (128, 256, 512, 1024, 2048), "down_block_types": ( "LTXVideo095DownBlock3D", "LTXVideo095DownBlock3D", "LTXVideo095DownBlock3D", "LTXVideo095DownBlock3D", ), "decoder_block_out_channels": (256, 512, 1024), "layers_per_block": (4, 6, 6, 2, 2), "decoder_layers_per_block": (5, 5, 5, 5), "spatio_temporal_scaling": (True, True, True, True), "decoder_spatio_temporal_scaling": (True, True, True), "decoder_inject_noise": (False, False, False, False), "downsample_type": ("spatial", "temporal", "spatiotemporal", "spatiotemporal"), "upsample_residual": (True, True, True), "upsample_factor": (2, 2, 2), "timestep_conditioning": True, "patch_size": 4, "patch_size_t": 1, "resnet_norm_eps": 1e-6, "scaling_factor": 1.0, "encoder_causal": True, "decoder_causal": False, "spatial_compression_ratio": 32, "temporal_compression_ratio": 8, } VAE_KEYS_RENAME_DICT.update(VAE_095_RENAME_DICT) elif version in ["0.9.7"]: config = { "in_channels": 3, "out_channels": 3, "latent_channels": 128, "block_out_channels": (128, 256, 512, 1024, 2048), "down_block_types": ( "LTXVideo095DownBlock3D", "LTXVideo095DownBlock3D", "LTXVideo095DownBlock3D", "LTXVideo095DownBlock3D", ), "decoder_block_out_channels": (256, 512, 1024), "layers_per_block": (4, 6, 6, 2, 2), "decoder_layers_per_block": (5, 5, 5, 5), "spatio_temporal_scaling": (True, True, True, True), "decoder_spatio_temporal_scaling": (True, True, True), "decoder_inject_noise": (False, False, False, False), "downsample_type": ("spatial", "temporal", "spatiotemporal", "spatiotemporal"), "upsample_residual": (True, True, True), "upsample_factor": (2, 2, 2), "timestep_conditioning": True, "patch_size": 4, "patch_size_t": 1, "resnet_norm_eps": 1e-6, "scaling_factor": 1.0, "encoder_causal": True, "decoder_causal": False, "spatial_compression_ratio": 32, "temporal_compression_ratio": 8, } VAE_KEYS_RENAME_DICT.update(VAE_095_RENAME_DICT) return config def get_spatial_latent_upsampler_config(version: str) -> Dict[str, Any]: if version == "0.9.7": config = { "in_channels": 128, "mid_channels": 512, "num_blocks_per_stage": 4, "dims": 3, "spatial_upsample": True, "temporal_upsample": False, } else: raise ValueError(f"Unsupported version: {version}") return config def get_args(): parser = argparse.ArgumentParser() parser.add_argument( "--transformer_ckpt_path", type=str, default=None, help="Path to original transformer checkpoint" ) parser.add_argument("--vae_ckpt_path", type=str, default=None, help="Path to original vae checkpoint") parser.add_argument( "--spatial_latent_upsampler_path", type=str, default=None, help="Path to original spatial latent upsampler checkpoint", ) parser.add_argument( "--text_encoder_cache_dir", type=str, default=None, help="Path to text encoder cache directory" ) parser.add_argument( "--typecast_text_encoder", action="store_true", default=False, help="Whether or not to apply fp16/bf16 precision to text_encoder", ) parser.add_argument("--save_pipeline", action="store_true") parser.add_argument("--output_path", type=str, required=True, help="Path where converted model should be saved") parser.add_argument("--dtype", default="fp32", help="Torch dtype to save the model in.") parser.add_argument( "--version", type=str, default="0.9.0", choices=["0.9.0", "0.9.1", "0.9.5", "0.9.7"], help="Version of the LTX model", ) return parser.parse_args() DTYPE_MAPPING = { "fp32": torch.float32, "fp16": torch.float16, "bf16": torch.bfloat16, } VARIANT_MAPPING = { "fp32": None, "fp16": "fp16", "bf16": "bf16", } if __name__ == "__main__": args = get_args() transformer = None dtype = DTYPE_MAPPING[args.dtype] variant = VARIANT_MAPPING[args.dtype] output_path = Path(args.output_path) if args.transformer_ckpt_path is not None: config = get_transformer_config(args.version) transformer: LTXVideoTransformer3DModel = convert_transformer(args.transformer_ckpt_path, config, dtype) if not args.save_pipeline: transformer.save_pretrained( output_path / "transformer", safe_serialization=True, max_shard_size="5GB", variant=variant ) if args.vae_ckpt_path is not None: config = get_vae_config(args.version) vae: AutoencoderKLLTXVideo = convert_vae(args.vae_ckpt_path, config, dtype) if not args.save_pipeline: vae.save_pretrained(output_path / "vae", safe_serialization=True, max_shard_size="5GB", variant=variant) if args.spatial_latent_upsampler_path is not None: config = get_spatial_latent_upsampler_config(args.version) latent_upsampler: LTXLatentUpsamplerModel = convert_spatial_latent_upsampler( args.spatial_latent_upsampler_path, config, dtype ) if not args.save_pipeline: latent_upsampler.save_pretrained( output_path / "latent_upsampler", safe_serialization=True, max_shard_size="5GB", variant=variant ) if args.save_pipeline: text_encoder_id = "google/t5-v1_1-xxl" tokenizer = T5Tokenizer.from_pretrained(text_encoder_id, model_max_length=TOKENIZER_MAX_LENGTH) text_encoder = T5EncoderModel.from_pretrained(text_encoder_id, cache_dir=args.text_encoder_cache_dir) if args.typecast_text_encoder: text_encoder = text_encoder.to(dtype=dtype) # Apparently, the conversion does not work anymore without this :shrug: for param in text_encoder.parameters(): param.data = param.data.contiguous() if args.version in ["0.9.5", "0.9.7"]: scheduler = FlowMatchEulerDiscreteScheduler(use_dynamic_shifting=False) else: scheduler = FlowMatchEulerDiscreteScheduler( use_dynamic_shifting=True, base_shift=0.95, max_shift=2.05, base_image_seq_len=1024, max_image_seq_len=4096, shift_terminal=0.1, ) if args.version in ["0.9.0", "0.9.1", "0.9.5"]: pipe = LTXPipeline( scheduler=scheduler, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, ) pipe.save_pretrained( output_path.as_posix(), safe_serialization=True, variant=variant, max_shard_size="5GB" ) elif args.version in ["0.9.7"]: pipe = LTXConditionPipeline( scheduler=scheduler, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, ) pipe_upsample = LTXLatentUpsamplePipeline( vae=vae, latent_upsampler=latent_upsampler, ) pipe.save_pretrained( (output_path / "ltx_pipeline").as_posix(), safe_serialization=True, variant=variant, max_shard_size="5GB", ) pipe_upsample.save_pretrained( (output_path / "ltx_upsample_pipeline").as_posix(), safe_serialization=True, variant=variant, max_shard_size="5GB", ) else: raise ValueError(f"Unsupported version: {args.version}")
diffusers/scripts/convert_ltx_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_ltx_to_diffusers.py", "repo_id": "diffusers", "token_count": 9469 }
154
#!/usr/bin/env python from __future__ import annotations import argparse from contextlib import nullcontext import torch from accelerate import init_empty_weights from diffusers import ( SanaControlNetModel, ) from diffusers.models.modeling_utils import load_model_dict_into_meta from diffusers.utils.import_utils import is_accelerate_available CTX = init_empty_weights if is_accelerate_available else nullcontext def main(args): file_path = args.orig_ckpt_path all_state_dict = torch.load(file_path, weights_only=True) state_dict = all_state_dict.pop("state_dict") converted_state_dict = {} # Patch embeddings. converted_state_dict["patch_embed.proj.weight"] = state_dict.pop("x_embedder.proj.weight") converted_state_dict["patch_embed.proj.bias"] = state_dict.pop("x_embedder.proj.bias") # Caption projection. converted_state_dict["caption_projection.linear_1.weight"] = state_dict.pop("y_embedder.y_proj.fc1.weight") converted_state_dict["caption_projection.linear_1.bias"] = state_dict.pop("y_embedder.y_proj.fc1.bias") converted_state_dict["caption_projection.linear_2.weight"] = state_dict.pop("y_embedder.y_proj.fc2.weight") converted_state_dict["caption_projection.linear_2.bias"] = state_dict.pop("y_embedder.y_proj.fc2.bias") # AdaLN-single LN converted_state_dict["time_embed.emb.timestep_embedder.linear_1.weight"] = state_dict.pop( "t_embedder.mlp.0.weight" ) converted_state_dict["time_embed.emb.timestep_embedder.linear_1.bias"] = state_dict.pop("t_embedder.mlp.0.bias") converted_state_dict["time_embed.emb.timestep_embedder.linear_2.weight"] = state_dict.pop( "t_embedder.mlp.2.weight" ) converted_state_dict["time_embed.emb.timestep_embedder.linear_2.bias"] = state_dict.pop("t_embedder.mlp.2.bias") # Shared norm. converted_state_dict["time_embed.linear.weight"] = state_dict.pop("t_block.1.weight") converted_state_dict["time_embed.linear.bias"] = state_dict.pop("t_block.1.bias") # y norm converted_state_dict["caption_norm.weight"] = state_dict.pop("attention_y_norm.weight") # Positional embedding interpolation scale. interpolation_scale = {512: None, 1024: None, 2048: 1.0, 4096: 2.0} # ControlNet Input Projection. converted_state_dict["input_block.weight"] = state_dict.pop("controlnet.0.before_proj.weight") converted_state_dict["input_block.bias"] = state_dict.pop("controlnet.0.before_proj.bias") for depth in range(7): # Transformer blocks. converted_state_dict[f"transformer_blocks.{depth}.scale_shift_table"] = state_dict.pop( f"controlnet.{depth}.copied_block.scale_shift_table" ) # Linear Attention is all you need 🤘 # Self attention. q, k, v = torch.chunk(state_dict.pop(f"controlnet.{depth}.copied_block.attn.qkv.weight"), 3, dim=0) converted_state_dict[f"transformer_blocks.{depth}.attn1.to_q.weight"] = q converted_state_dict[f"transformer_blocks.{depth}.attn1.to_k.weight"] = k converted_state_dict[f"transformer_blocks.{depth}.attn1.to_v.weight"] = v # Projection. converted_state_dict[f"transformer_blocks.{depth}.attn1.to_out.0.weight"] = state_dict.pop( f"controlnet.{depth}.copied_block.attn.proj.weight" ) converted_state_dict[f"transformer_blocks.{depth}.attn1.to_out.0.bias"] = state_dict.pop( f"controlnet.{depth}.copied_block.attn.proj.bias" ) # Feed-forward. converted_state_dict[f"transformer_blocks.{depth}.ff.conv_inverted.weight"] = state_dict.pop( f"controlnet.{depth}.copied_block.mlp.inverted_conv.conv.weight" ) converted_state_dict[f"transformer_blocks.{depth}.ff.conv_inverted.bias"] = state_dict.pop( f"controlnet.{depth}.copied_block.mlp.inverted_conv.conv.bias" ) converted_state_dict[f"transformer_blocks.{depth}.ff.conv_depth.weight"] = state_dict.pop( f"controlnet.{depth}.copied_block.mlp.depth_conv.conv.weight" ) converted_state_dict[f"transformer_blocks.{depth}.ff.conv_depth.bias"] = state_dict.pop( f"controlnet.{depth}.copied_block.mlp.depth_conv.conv.bias" ) converted_state_dict[f"transformer_blocks.{depth}.ff.conv_point.weight"] = state_dict.pop( f"controlnet.{depth}.copied_block.mlp.point_conv.conv.weight" ) # Cross-attention. q = state_dict.pop(f"controlnet.{depth}.copied_block.cross_attn.q_linear.weight") q_bias = state_dict.pop(f"controlnet.{depth}.copied_block.cross_attn.q_linear.bias") k, v = torch.chunk(state_dict.pop(f"controlnet.{depth}.copied_block.cross_attn.kv_linear.weight"), 2, dim=0) k_bias, v_bias = torch.chunk( state_dict.pop(f"controlnet.{depth}.copied_block.cross_attn.kv_linear.bias"), 2, dim=0 ) converted_state_dict[f"transformer_blocks.{depth}.attn2.to_q.weight"] = q converted_state_dict[f"transformer_blocks.{depth}.attn2.to_q.bias"] = q_bias converted_state_dict[f"transformer_blocks.{depth}.attn2.to_k.weight"] = k converted_state_dict[f"transformer_blocks.{depth}.attn2.to_k.bias"] = k_bias converted_state_dict[f"transformer_blocks.{depth}.attn2.to_v.weight"] = v converted_state_dict[f"transformer_blocks.{depth}.attn2.to_v.bias"] = v_bias converted_state_dict[f"transformer_blocks.{depth}.attn2.to_out.0.weight"] = state_dict.pop( f"controlnet.{depth}.copied_block.cross_attn.proj.weight" ) converted_state_dict[f"transformer_blocks.{depth}.attn2.to_out.0.bias"] = state_dict.pop( f"controlnet.{depth}.copied_block.cross_attn.proj.bias" ) # ControlNet After Projection converted_state_dict[f"controlnet_blocks.{depth}.weight"] = state_dict.pop( f"controlnet.{depth}.after_proj.weight" ) converted_state_dict[f"controlnet_blocks.{depth}.bias"] = state_dict.pop(f"controlnet.{depth}.after_proj.bias") # ControlNet with CTX(): controlnet = SanaControlNetModel( num_attention_heads=model_kwargs[args.model_type]["num_attention_heads"], attention_head_dim=model_kwargs[args.model_type]["attention_head_dim"], num_layers=model_kwargs[args.model_type]["num_layers"], num_cross_attention_heads=model_kwargs[args.model_type]["num_cross_attention_heads"], cross_attention_head_dim=model_kwargs[args.model_type]["cross_attention_head_dim"], cross_attention_dim=model_kwargs[args.model_type]["cross_attention_dim"], caption_channels=2304, sample_size=args.image_size // 32, interpolation_scale=interpolation_scale[args.image_size], ) if is_accelerate_available(): load_model_dict_into_meta(controlnet, converted_state_dict) else: controlnet.load_state_dict(converted_state_dict, strict=True, assign=True) num_model_params = sum(p.numel() for p in controlnet.parameters()) print(f"Total number of controlnet parameters: {num_model_params}") controlnet = controlnet.to(weight_dtype) controlnet.load_state_dict(converted_state_dict, strict=True) print(f"Saving Sana ControlNet in Diffusers format in {args.dump_path}.") controlnet.save_pretrained(args.dump_path) DTYPE_MAPPING = { "fp32": torch.float32, "fp16": torch.float16, "bf16": torch.bfloat16, } VARIANT_MAPPING = { "fp32": None, "fp16": "fp16", "bf16": "bf16", } if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--orig_ckpt_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument( "--image_size", default=1024, type=int, choices=[512, 1024, 2048, 4096], required=False, help="Image size of pretrained model, 512, 1024, 2048 or 4096.", ) parser.add_argument( "--model_type", default="SanaMS_1600M_P1_ControlNet_D7", type=str, choices=["SanaMS_1600M_P1_ControlNet_D7", "SanaMS_600M_P1_ControlNet_D7"], ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output pipeline.") parser.add_argument("--dtype", default="fp16", type=str, choices=["fp32", "fp16", "bf16"], help="Weight dtype.") args = parser.parse_args() model_kwargs = { "SanaMS_1600M_P1_ControlNet_D7": { "num_attention_heads": 70, "attention_head_dim": 32, "num_cross_attention_heads": 20, "cross_attention_head_dim": 112, "cross_attention_dim": 2240, "num_layers": 7, }, "SanaMS_600M_P1_ControlNet_D7": { "num_attention_heads": 36, "attention_head_dim": 32, "num_cross_attention_heads": 16, "cross_attention_head_dim": 72, "cross_attention_dim": 1152, "num_layers": 7, }, } device = "cuda" if torch.cuda.is_available() else "cpu" weight_dtype = DTYPE_MAPPING[args.dtype] variant = VARIANT_MAPPING[args.dtype] main(args)
diffusers/scripts/convert_sana_controlnet_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_sana_controlnet_to_diffusers.py", "repo_id": "diffusers", "token_count": 4103 }
155
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL is_torch_less_than_1_11 = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def onnx_export( model, model_args: tuple, output_path: Path, ordered_input_names, output_names, dynamic_axes, opset, use_external_data_format=False, ): output_path.parent.mkdir(parents=True, exist_ok=True) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( model, model_args, f=output_path.as_posix(), input_names=ordered_input_names, output_names=output_names, dynamic_axes=dynamic_axes, do_constant_folding=True, use_external_data_format=use_external_data_format, enable_onnx_checker=True, opset_version=opset, ) else: export( model, model_args, f=output_path.as_posix(), input_names=ordered_input_names, output_names=output_names, dynamic_axes=dynamic_axes, do_constant_folding=True, opset_version=opset, ) @torch.no_grad() def convert_models(model_path: str, output_path: str, opset: int, fp16: bool = False): dtype = torch.float16 if fp16 else torch.float32 if fp16 and torch.cuda.is_available(): device = "cuda" elif fp16 and not torch.cuda.is_available(): raise ValueError("`float16` model export is only supported on GPUs with CUDA") else: device = "cpu" output_path = Path(output_path) # VAE DECODER vae_decoder = AutoencoderKL.from_pretrained(model_path + "/vae") vae_latent_channels = vae_decoder.config.latent_channels # forward only through the decoder part vae_decoder.forward = vae_decoder.decode onnx_export( vae_decoder, model_args=( torch.randn(1, vae_latent_channels, 25, 25).to(device=device, dtype=dtype), False, ), output_path=output_path / "vae_decoder" / "model.onnx", ordered_input_names=["latent_sample", "return_dict"], output_names=["sample"], dynamic_axes={ "latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, }, opset=opset, ) del vae_decoder if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") args = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fp16) print("SD: Done: ONNX")
diffusers/scripts/convert_vae_diff_to_onnx.py/0
{ "file_path": "diffusers/scripts/convert_vae_diff_to_onnx.py", "repo_id": "diffusers", "token_count": 1684 }
156
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Usage example: diffusers-cli fp16_safetensors --ckpt_id=openai/shap-e --fp16 --use_safetensors """ import glob import json import warnings from argparse import ArgumentParser, Namespace from importlib import import_module import huggingface_hub import torch from huggingface_hub import hf_hub_download from packaging import version from ..utils import logging from . import BaseDiffusersCLICommand def conversion_command_factory(args: Namespace): if args.use_auth_token: warnings.warn( "The `--use_auth_token` flag is deprecated and will be removed in a future version. Authentication is now" " handled automatically if user is logged in." ) return FP16SafetensorsCommand(args.ckpt_id, args.fp16, args.use_safetensors) class FP16SafetensorsCommand(BaseDiffusersCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): conversion_parser = parser.add_parser("fp16_safetensors") conversion_parser.add_argument( "--ckpt_id", type=str, help="Repo id of the checkpoints on which to run the conversion. Example: 'openai/shap-e'.", ) conversion_parser.add_argument( "--fp16", action="store_true", help="If serializing the variables in FP16 precision." ) conversion_parser.add_argument( "--use_safetensors", action="store_true", help="If serializing in the safetensors format." ) conversion_parser.add_argument( "--use_auth_token", action="store_true", help="When working with checkpoints having private visibility. When used `hf auth login` needs to be run beforehand.", ) conversion_parser.set_defaults(func=conversion_command_factory) def __init__(self, ckpt_id: str, fp16: bool, use_safetensors: bool): self.logger = logging.get_logger("diffusers-cli/fp16_safetensors") self.ckpt_id = ckpt_id self.local_ckpt_dir = f"/tmp/{ckpt_id}" self.fp16 = fp16 self.use_safetensors = use_safetensors if not self.use_safetensors and not self.fp16: raise NotImplementedError( "When `use_safetensors` and `fp16` both are False, then this command is of no use." ) def run(self): if version.parse(huggingface_hub.__version__) < version.parse("0.9.0"): raise ImportError( "The huggingface_hub version must be >= 0.9.0 to use this command. Please update your huggingface_hub" " installation." ) else: from huggingface_hub import create_commit from huggingface_hub._commit_api import CommitOperationAdd model_index = hf_hub_download(repo_id=self.ckpt_id, filename="model_index.json") with open(model_index, "r") as f: pipeline_class_name = json.load(f)["_class_name"] pipeline_class = getattr(import_module("diffusers"), pipeline_class_name) self.logger.info(f"Pipeline class imported: {pipeline_class_name}.") # Load the appropriate pipeline. We could have use `DiffusionPipeline` # here, but just to avoid any rough edge cases. pipeline = pipeline_class.from_pretrained( self.ckpt_id, torch_dtype=torch.float16 if self.fp16 else torch.float32 ) pipeline.save_pretrained( self.local_ckpt_dir, safe_serialization=True if self.use_safetensors else False, variant="fp16" if self.fp16 else None, ) self.logger.info(f"Pipeline locally saved to {self.local_ckpt_dir}.") # Fetch all the paths. if self.fp16: modified_paths = glob.glob(f"{self.local_ckpt_dir}/*/*.fp16.*") elif self.use_safetensors: modified_paths = glob.glob(f"{self.local_ckpt_dir}/*/*.safetensors") # Prepare for the PR. commit_message = f"Serialize variables with FP16: {self.fp16} and safetensors: {self.use_safetensors}." operations = [] for path in modified_paths: operations.append(CommitOperationAdd(path_in_repo="/".join(path.split("/")[4:]), path_or_fileobj=path)) # Open the PR. commit_description = ( "Variables converted by the [`diffusers`' `fp16_safetensors`" " CLI](https://github.com/huggingface/diffusers/blob/main/src/diffusers/commands/fp16_safetensors.py)." ) hub_pr_url = create_commit( repo_id=self.ckpt_id, operations=operations, commit_message=commit_message, commit_description=commit_description, repo_type="model", create_pr=True, ).pr_url self.logger.info(f"PR created here: {hub_pr_url}.")
diffusers/src/diffusers/commands/fp16_safetensors.py/0
{ "file_path": "diffusers/src/diffusers/commands/fp16_safetensors.py", "repo_id": "diffusers", "token_count": 2245 }
157
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import torch from ..configuration_utils import register_to_config from ..hooks import HookRegistry, LayerSkipConfig from ..hooks.layer_skip import _apply_layer_skip_hook from .guider_utils import BaseGuidance, rescale_noise_cfg if TYPE_CHECKING: from ..modular_pipelines.modular_pipeline import BlockState class SkipLayerGuidance(BaseGuidance): """ Skip Layer Guidance (SLG): https://github.com/Stability-AI/sd3.5 Spatio-Temporal Guidance (STG): https://huggingface.co/papers/2411.18664 SLG was introduced by StabilityAI for improving structure and anotomy coherence in generated images. It works by skipping the forward pass of specified transformer blocks during the denoising process on an additional conditional batch of data, apart from the conditional and unconditional batches already used in CFG ([~guiders.classifier_free_guidance.ClassifierFreeGuidance]), and then scaling and shifting the CFG predictions based on the difference between conditional without skipping and conditional with skipping predictions. The intution behind SLG can be thought of as moving the CFG predicted distribution estimates further away from worse versions of the conditional distribution estimates (because skipping layers is equivalent to using a worse version of the model for the conditional prediction). STG is an improvement and follow-up work combining ideas from SLG, PAG and similar techniques for improving generation quality in video diffusion models. Additional reading: - [Guiding a Diffusion Model with a Bad Version of Itself](https://huggingface.co/papers/2406.02507) The values for `skip_layer_guidance_scale`, `skip_layer_guidance_start`, and `skip_layer_guidance_stop` are defaulted to the recommendations by StabilityAI for Stable Diffusion 3.5 Medium. Args: guidance_scale (`float`, defaults to `7.5`): The scale parameter for classifier-free guidance. Higher values result in stronger conditioning on the text prompt, while lower values allow for more freedom in generation. Higher values may lead to saturation and deterioration of image quality. skip_layer_guidance_scale (`float`, defaults to `2.8`): The scale parameter for skip layer guidance. Anatomy and structure coherence may improve with higher values, but it may also lead to overexposure and saturation. skip_layer_guidance_start (`float`, defaults to `0.01`): The fraction of the total number of denoising steps after which skip layer guidance starts. skip_layer_guidance_stop (`float`, defaults to `0.2`): The fraction of the total number of denoising steps after which skip layer guidance stops. skip_layer_guidance_layers (`int` or `List[int]`, *optional*): The layer indices to apply skip layer guidance to. Can be a single integer or a list of integers. If not provided, `skip_layer_config` must be provided. The recommended values are `[7, 8, 9]` for Stable Diffusion 3.5 Medium. skip_layer_config (`LayerSkipConfig` or `List[LayerSkipConfig]`, *optional*): The configuration for the skip layer guidance. Can be a single `LayerSkipConfig` or a list of `LayerSkipConfig`. If not provided, `skip_layer_guidance_layers` must be provided. guidance_rescale (`float`, defaults to `0.0`): The rescale factor applied to the noise predictions. This is used to improve image quality and fix overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). use_original_formulation (`bool`, defaults to `False`): Whether to use the original formulation of classifier-free guidance as proposed in the paper. By default, we use the diffusers-native implementation that has been in the codebase for a long time. See [~guiders.classifier_free_guidance.ClassifierFreeGuidance] for more details. start (`float`, defaults to `0.01`): The fraction of the total number of denoising steps after which guidance starts. stop (`float`, defaults to `0.2`): The fraction of the total number of denoising steps after which guidance stops. """ _input_predictions = ["pred_cond", "pred_uncond", "pred_cond_skip"] @register_to_config def __init__( self, guidance_scale: float = 7.5, skip_layer_guidance_scale: float = 2.8, skip_layer_guidance_start: float = 0.01, skip_layer_guidance_stop: float = 0.2, skip_layer_guidance_layers: Optional[Union[int, List[int]]] = None, skip_layer_config: Union[LayerSkipConfig, List[LayerSkipConfig], Dict[str, Any]] = None, guidance_rescale: float = 0.0, use_original_formulation: bool = False, start: float = 0.0, stop: float = 1.0, ): super().__init__(start, stop) self.guidance_scale = guidance_scale self.skip_layer_guidance_scale = skip_layer_guidance_scale self.skip_layer_guidance_start = skip_layer_guidance_start self.skip_layer_guidance_stop = skip_layer_guidance_stop self.guidance_rescale = guidance_rescale self.use_original_formulation = use_original_formulation if not (0.0 <= skip_layer_guidance_start < 1.0): raise ValueError( f"Expected `skip_layer_guidance_start` to be between 0.0 and 1.0, but got {skip_layer_guidance_start}." ) if not (skip_layer_guidance_start <= skip_layer_guidance_stop <= 1.0): raise ValueError( f"Expected `skip_layer_guidance_stop` to be between 0.0 and 1.0, but got {skip_layer_guidance_stop}." ) if skip_layer_guidance_layers is None and skip_layer_config is None: raise ValueError( "Either `skip_layer_guidance_layers` or `skip_layer_config` must be provided to enable Skip Layer Guidance." ) if skip_layer_guidance_layers is not None and skip_layer_config is not None: raise ValueError("Only one of `skip_layer_guidance_layers` or `skip_layer_config` can be provided.") if skip_layer_guidance_layers is not None: if isinstance(skip_layer_guidance_layers, int): skip_layer_guidance_layers = [skip_layer_guidance_layers] if not isinstance(skip_layer_guidance_layers, list): raise ValueError( f"Expected `skip_layer_guidance_layers` to be an int or a list of ints, but got {type(skip_layer_guidance_layers)}." ) skip_layer_config = [LayerSkipConfig(layer, fqn="auto") for layer in skip_layer_guidance_layers] if isinstance(skip_layer_config, dict): skip_layer_config = LayerSkipConfig.from_dict(skip_layer_config) if isinstance(skip_layer_config, LayerSkipConfig): skip_layer_config = [skip_layer_config] if not isinstance(skip_layer_config, list): raise ValueError( f"Expected `skip_layer_config` to be a LayerSkipConfig or a list of LayerSkipConfig, but got {type(skip_layer_config)}." ) elif isinstance(next(iter(skip_layer_config), None), dict): skip_layer_config = [LayerSkipConfig.from_dict(config) for config in skip_layer_config] self.skip_layer_config = skip_layer_config self._skip_layer_hook_names = [f"SkipLayerGuidance_{i}" for i in range(len(self.skip_layer_config))] def prepare_models(self, denoiser: torch.nn.Module) -> None: self._count_prepared += 1 if self._is_slg_enabled() and self.is_conditional and self._count_prepared > 1: for name, config in zip(self._skip_layer_hook_names, self.skip_layer_config): _apply_layer_skip_hook(denoiser, config, name=name) def cleanup_models(self, denoiser: torch.nn.Module) -> None: if self._is_slg_enabled() and self.is_conditional and self._count_prepared > 1: registry = HookRegistry.check_if_exists_or_initialize(denoiser) # Remove the hooks after inference for hook_name in self._skip_layer_hook_names: registry.remove_hook(hook_name, recurse=True) def prepare_inputs( self, data: "BlockState", input_fields: Optional[Dict[str, Union[str, Tuple[str, str]]]] = None ) -> List["BlockState"]: if input_fields is None: input_fields = self._input_fields if self.num_conditions == 1: tuple_indices = [0] input_predictions = ["pred_cond"] elif self.num_conditions == 2: tuple_indices = [0, 1] input_predictions = ( ["pred_cond", "pred_uncond"] if self._is_cfg_enabled() else ["pred_cond", "pred_cond_skip"] ) else: tuple_indices = [0, 1, 0] input_predictions = ["pred_cond", "pred_uncond", "pred_cond_skip"] data_batches = [] for i in range(self.num_conditions): data_batch = self._prepare_batch(input_fields, data, tuple_indices[i], input_predictions[i]) data_batches.append(data_batch) return data_batches def forward( self, pred_cond: torch.Tensor, pred_uncond: Optional[torch.Tensor] = None, pred_cond_skip: Optional[torch.Tensor] = None, ) -> torch.Tensor: pred = None if not self._is_cfg_enabled() and not self._is_slg_enabled(): pred = pred_cond elif not self._is_cfg_enabled(): shift = pred_cond - pred_cond_skip pred = pred_cond if self.use_original_formulation else pred_cond_skip pred = pred + self.skip_layer_guidance_scale * shift elif not self._is_slg_enabled(): shift = pred_cond - pred_uncond pred = pred_cond if self.use_original_formulation else pred_uncond pred = pred + self.guidance_scale * shift else: shift = pred_cond - pred_uncond shift_skip = pred_cond - pred_cond_skip pred = pred_cond if self.use_original_formulation else pred_uncond pred = pred + self.guidance_scale * shift + self.skip_layer_guidance_scale * shift_skip if self.guidance_rescale > 0.0: pred = rescale_noise_cfg(pred, pred_cond, self.guidance_rescale) return pred, {} @property def is_conditional(self) -> bool: return self._count_prepared == 1 or self._count_prepared == 3 @property def num_conditions(self) -> int: num_conditions = 1 if self._is_cfg_enabled(): num_conditions += 1 if self._is_slg_enabled(): num_conditions += 1 return num_conditions def _is_cfg_enabled(self) -> bool: if not self._enabled: return False is_within_range = True if self._num_inference_steps is not None: skip_start_step = int(self._start * self._num_inference_steps) skip_stop_step = int(self._stop * self._num_inference_steps) is_within_range = skip_start_step <= self._step < skip_stop_step is_close = False if self.use_original_formulation: is_close = math.isclose(self.guidance_scale, 0.0) else: is_close = math.isclose(self.guidance_scale, 1.0) return is_within_range and not is_close def _is_slg_enabled(self) -> bool: if not self._enabled: return False is_within_range = True if self._num_inference_steps is not None: skip_start_step = int(self.skip_layer_guidance_start * self._num_inference_steps) skip_stop_step = int(self.skip_layer_guidance_stop * self._num_inference_steps) is_within_range = skip_start_step < self._step < skip_stop_step is_zero = math.isclose(self.skip_layer_guidance_scale, 0.0) return is_within_range and not is_zero
diffusers/src/diffusers/guiders/skip_layer_guidance.py/0
{ "file_path": "diffusers/src/diffusers/guiders/skip_layer_guidance.py", "repo_id": "diffusers", "token_count": 5103 }
158
from typing import TYPE_CHECKING from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available def text_encoder_lora_state_dict(text_encoder): deprecate( "text_encoder_load_state_dict in `models`", "0.27.0", "`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.", ) state_dict = {} for name, module in text_encoder_attn_modules(text_encoder): for k, v in module.q_proj.lora_linear_layer.state_dict().items(): state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v for k, v in module.k_proj.lora_linear_layer.state_dict().items(): state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v for k, v in module.v_proj.lora_linear_layer.state_dict().items(): state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v for k, v in module.out_proj.lora_linear_layer.state_dict().items(): state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v return state_dict if is_transformers_available(): def text_encoder_attn_modules(text_encoder): deprecate( "text_encoder_attn_modules in `models`", "0.27.0", "`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.", ) from transformers import CLIPTextModel, CLIPTextModelWithProjection attn_modules = [] if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)): for i, layer in enumerate(text_encoder.text_model.encoder.layers): name = f"text_model.encoder.layers.{i}.self_attn" mod = layer.self_attn attn_modules.append((name, mod)) else: raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}") return attn_modules _import_structure = {} if is_torch_available(): _import_structure["single_file_model"] = ["FromOriginalModelMixin"] _import_structure["transformer_flux"] = ["FluxTransformer2DLoadersMixin"] _import_structure["transformer_sd3"] = ["SD3Transformer2DLoadersMixin"] _import_structure["unet"] = ["UNet2DConditionLoadersMixin"] _import_structure["utils"] = ["AttnProcsLayers"] if is_transformers_available(): _import_structure["single_file"] = ["FromSingleFileMixin"] _import_structure["lora_pipeline"] = [ "AmusedLoraLoaderMixin", "StableDiffusionLoraLoaderMixin", "SD3LoraLoaderMixin", "AuraFlowLoraLoaderMixin", "StableDiffusionXLLoraLoaderMixin", "LTXVideoLoraLoaderMixin", "LoraLoaderMixin", "FluxLoraLoaderMixin", "CogVideoXLoraLoaderMixin", "CogView4LoraLoaderMixin", "Mochi1LoraLoaderMixin", "HunyuanVideoLoraLoaderMixin", "SanaLoraLoaderMixin", "Lumina2LoraLoaderMixin", "WanLoraLoaderMixin", "HiDreamImageLoraLoaderMixin", "SkyReelsV2LoraLoaderMixin", "QwenImageLoraLoaderMixin", ] _import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"] _import_structure["ip_adapter"] = [ "IPAdapterMixin", "FluxIPAdapterMixin", "SD3IPAdapterMixin", "ModularIPAdapterMixin", ] _import_structure["peft"] = ["PeftAdapterMixin"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: if is_torch_available(): from .single_file_model import FromOriginalModelMixin from .transformer_flux import FluxTransformer2DLoadersMixin from .transformer_sd3 import SD3Transformer2DLoadersMixin from .unet import UNet2DConditionLoadersMixin from .utils import AttnProcsLayers if is_transformers_available(): from .ip_adapter import ( FluxIPAdapterMixin, IPAdapterMixin, ModularIPAdapterMixin, SD3IPAdapterMixin, ) from .lora_pipeline import ( AmusedLoraLoaderMixin, AuraFlowLoraLoaderMixin, CogVideoXLoraLoaderMixin, CogView4LoraLoaderMixin, FluxLoraLoaderMixin, HiDreamImageLoraLoaderMixin, HunyuanVideoLoraLoaderMixin, LoraLoaderMixin, LTXVideoLoraLoaderMixin, Lumina2LoraLoaderMixin, Mochi1LoraLoaderMixin, QwenImageLoraLoaderMixin, SanaLoraLoaderMixin, SD3LoraLoaderMixin, SkyReelsV2LoraLoaderMixin, StableDiffusionLoraLoaderMixin, StableDiffusionXLLoraLoaderMixin, WanLoraLoaderMixin, ) from .single_file import FromSingleFileMixin from .textual_inversion import TextualInversionLoaderMixin from .peft import PeftAdapterMixin else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diffusers/src/diffusers/loaders/__init__.py/0
{ "file_path": "diffusers/src/diffusers/loaders/__init__.py", "repo_id": "diffusers", "token_count": 2647 }
159
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..utils import ( DIFFUSERS_SLOW_IMPORT, _LazyModule, is_flax_available, is_torch_available, ) _import_structure = {} if is_torch_available(): _import_structure["adapter"] = ["MultiAdapter", "T2IAdapter"] _import_structure["attention_dispatch"] = ["AttentionBackendName", "attention_backend"] _import_structure["auto_model"] = ["AutoModel"] _import_structure["autoencoders.autoencoder_asym_kl"] = ["AsymmetricAutoencoderKL"] _import_structure["autoencoders.autoencoder_dc"] = ["AutoencoderDC"] _import_structure["autoencoders.autoencoder_kl"] = ["AutoencoderKL"] _import_structure["autoencoders.autoencoder_kl_allegro"] = ["AutoencoderKLAllegro"] _import_structure["autoencoders.autoencoder_kl_cogvideox"] = ["AutoencoderKLCogVideoX"] _import_structure["autoencoders.autoencoder_kl_cosmos"] = ["AutoencoderKLCosmos"] _import_structure["autoencoders.autoencoder_kl_hunyuan_video"] = ["AutoencoderKLHunyuanVideo"] _import_structure["autoencoders.autoencoder_kl_ltx"] = ["AutoencoderKLLTXVideo"] _import_structure["autoencoders.autoencoder_kl_magvit"] = ["AutoencoderKLMagvit"] _import_structure["autoencoders.autoencoder_kl_mochi"] = ["AutoencoderKLMochi"] _import_structure["autoencoders.autoencoder_kl_qwenimage"] = ["AutoencoderKLQwenImage"] _import_structure["autoencoders.autoencoder_kl_temporal_decoder"] = ["AutoencoderKLTemporalDecoder"] _import_structure["autoencoders.autoencoder_kl_wan"] = ["AutoencoderKLWan"] _import_structure["autoencoders.autoencoder_oobleck"] = ["AutoencoderOobleck"] _import_structure["autoencoders.autoencoder_tiny"] = ["AutoencoderTiny"] _import_structure["autoencoders.consistency_decoder_vae"] = ["ConsistencyDecoderVAE"] _import_structure["autoencoders.vq_model"] = ["VQModel"] _import_structure["cache_utils"] = ["CacheMixin"] _import_structure["controlnets.controlnet"] = ["ControlNetModel"] _import_structure["controlnets.controlnet_flux"] = ["FluxControlNetModel", "FluxMultiControlNetModel"] _import_structure["controlnets.controlnet_hunyuan"] = [ "HunyuanDiT2DControlNetModel", "HunyuanDiT2DMultiControlNetModel", ] _import_structure["controlnets.controlnet_qwenimage"] = [ "QwenImageControlNetModel", "QwenImageMultiControlNetModel", ] _import_structure["controlnets.controlnet_sana"] = ["SanaControlNetModel"] _import_structure["controlnets.controlnet_sd3"] = ["SD3ControlNetModel", "SD3MultiControlNetModel"] _import_structure["controlnets.controlnet_sparsectrl"] = ["SparseControlNetModel"] _import_structure["controlnets.controlnet_union"] = ["ControlNetUnionModel"] _import_structure["controlnets.controlnet_xs"] = ["ControlNetXSAdapter", "UNetControlNetXSModel"] _import_structure["controlnets.multicontrolnet"] = ["MultiControlNetModel"] _import_structure["controlnets.multicontrolnet_union"] = ["MultiControlNetUnionModel"] _import_structure["embeddings"] = ["ImageProjection"] _import_structure["modeling_utils"] = ["ModelMixin"] _import_structure["transformers.auraflow_transformer_2d"] = ["AuraFlowTransformer2DModel"] _import_structure["transformers.cogvideox_transformer_3d"] = ["CogVideoXTransformer3DModel"] _import_structure["transformers.consisid_transformer_3d"] = ["ConsisIDTransformer3DModel"] _import_structure["transformers.dit_transformer_2d"] = ["DiTTransformer2DModel"] _import_structure["transformers.dual_transformer_2d"] = ["DualTransformer2DModel"] _import_structure["transformers.hunyuan_transformer_2d"] = ["HunyuanDiT2DModel"] _import_structure["transformers.latte_transformer_3d"] = ["LatteTransformer3DModel"] _import_structure["transformers.lumina_nextdit2d"] = ["LuminaNextDiT2DModel"] _import_structure["transformers.pixart_transformer_2d"] = ["PixArtTransformer2DModel"] _import_structure["transformers.prior_transformer"] = ["PriorTransformer"] _import_structure["transformers.sana_transformer"] = ["SanaTransformer2DModel"] _import_structure["transformers.stable_audio_transformer"] = ["StableAudioDiTModel"] _import_structure["transformers.t5_film_transformer"] = ["T5FilmDecoder"] _import_structure["transformers.transformer_2d"] = ["Transformer2DModel"] _import_structure["transformers.transformer_allegro"] = ["AllegroTransformer3DModel"] _import_structure["transformers.transformer_bria"] = ["BriaTransformer2DModel"] _import_structure["transformers.transformer_chroma"] = ["ChromaTransformer2DModel"] _import_structure["transformers.transformer_cogview3plus"] = ["CogView3PlusTransformer2DModel"] _import_structure["transformers.transformer_cogview4"] = ["CogView4Transformer2DModel"] _import_structure["transformers.transformer_cosmos"] = ["CosmosTransformer3DModel"] _import_structure["transformers.transformer_easyanimate"] = ["EasyAnimateTransformer3DModel"] _import_structure["transformers.transformer_flux"] = ["FluxTransformer2DModel"] _import_structure["transformers.transformer_hidream_image"] = ["HiDreamImageTransformer2DModel"] _import_structure["transformers.transformer_hunyuan_video"] = ["HunyuanVideoTransformer3DModel"] _import_structure["transformers.transformer_hunyuan_video_framepack"] = ["HunyuanVideoFramepackTransformer3DModel"] _import_structure["transformers.transformer_ltx"] = ["LTXVideoTransformer3DModel"] _import_structure["transformers.transformer_lumina2"] = ["Lumina2Transformer2DModel"] _import_structure["transformers.transformer_mochi"] = ["MochiTransformer3DModel"] _import_structure["transformers.transformer_omnigen"] = ["OmniGenTransformer2DModel"] _import_structure["transformers.transformer_qwenimage"] = ["QwenImageTransformer2DModel"] _import_structure["transformers.transformer_sd3"] = ["SD3Transformer2DModel"] _import_structure["transformers.transformer_skyreels_v2"] = ["SkyReelsV2Transformer3DModel"] _import_structure["transformers.transformer_temporal"] = ["TransformerTemporalModel"] _import_structure["transformers.transformer_wan"] = ["WanTransformer3DModel"] _import_structure["transformers.transformer_wan_vace"] = ["WanVACETransformer3DModel"] _import_structure["unets.unet_1d"] = ["UNet1DModel"] _import_structure["unets.unet_2d"] = ["UNet2DModel"] _import_structure["unets.unet_2d_condition"] = ["UNet2DConditionModel"] _import_structure["unets.unet_3d_condition"] = ["UNet3DConditionModel"] _import_structure["unets.unet_i2vgen_xl"] = ["I2VGenXLUNet"] _import_structure["unets.unet_kandinsky3"] = ["Kandinsky3UNet"] _import_structure["unets.unet_motion_model"] = ["MotionAdapter", "UNetMotionModel"] _import_structure["unets.unet_spatio_temporal_condition"] = ["UNetSpatioTemporalConditionModel"] _import_structure["unets.unet_stable_cascade"] = ["StableCascadeUNet"] _import_structure["unets.uvit_2d"] = ["UVit2DModel"] if is_flax_available(): _import_structure["controlnets.controlnet_flax"] = ["FlaxControlNetModel"] _import_structure["unets.unet_2d_condition_flax"] = ["FlaxUNet2DConditionModel"] _import_structure["vae_flax"] = ["FlaxAutoencoderKL"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: if is_torch_available(): from .adapter import MultiAdapter, T2IAdapter from .attention_dispatch import AttentionBackendName, attention_backend from .auto_model import AutoModel from .autoencoders import ( AsymmetricAutoencoderKL, AutoencoderDC, AutoencoderKL, AutoencoderKLAllegro, AutoencoderKLCogVideoX, AutoencoderKLCosmos, AutoencoderKLHunyuanVideo, AutoencoderKLLTXVideo, AutoencoderKLMagvit, AutoencoderKLMochi, AutoencoderKLQwenImage, AutoencoderKLTemporalDecoder, AutoencoderKLWan, AutoencoderOobleck, AutoencoderTiny, ConsistencyDecoderVAE, VQModel, ) from .cache_utils import CacheMixin from .controlnets import ( ControlNetModel, ControlNetUnionModel, ControlNetXSAdapter, FluxControlNetModel, FluxMultiControlNetModel, HunyuanDiT2DControlNetModel, HunyuanDiT2DMultiControlNetModel, MultiControlNetModel, MultiControlNetUnionModel, QwenImageControlNetModel, QwenImageMultiControlNetModel, SanaControlNetModel, SD3ControlNetModel, SD3MultiControlNetModel, SparseControlNetModel, UNetControlNetXSModel, ) from .embeddings import ImageProjection from .modeling_utils import ModelMixin from .transformers import ( AllegroTransformer3DModel, AuraFlowTransformer2DModel, BriaTransformer2DModel, ChromaTransformer2DModel, CogVideoXTransformer3DModel, CogView3PlusTransformer2DModel, CogView4Transformer2DModel, ConsisIDTransformer3DModel, CosmosTransformer3DModel, DiTTransformer2DModel, DualTransformer2DModel, EasyAnimateTransformer3DModel, FluxTransformer2DModel, HiDreamImageTransformer2DModel, HunyuanDiT2DModel, HunyuanVideoFramepackTransformer3DModel, HunyuanVideoTransformer3DModel, LatteTransformer3DModel, LTXVideoTransformer3DModel, Lumina2Transformer2DModel, LuminaNextDiT2DModel, MochiTransformer3DModel, OmniGenTransformer2DModel, PixArtTransformer2DModel, PriorTransformer, QwenImageTransformer2DModel, SanaTransformer2DModel, SD3Transformer2DModel, SkyReelsV2Transformer3DModel, StableAudioDiTModel, T5FilmDecoder, Transformer2DModel, TransformerTemporalModel, WanTransformer3DModel, WanVACETransformer3DModel, ) from .unets import ( I2VGenXLUNet, Kandinsky3UNet, MotionAdapter, StableCascadeUNet, UNet1DModel, UNet2DConditionModel, UNet2DModel, UNet3DConditionModel, UNetMotionModel, UNetSpatioTemporalConditionModel, UVit2DModel, ) if is_flax_available(): from .controlnets import FlaxControlNetModel from .unets import FlaxUNet2DConditionModel from .vae_flax import FlaxAutoencoderKL else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diffusers/src/diffusers/models/__init__.py/0
{ "file_path": "diffusers/src/diffusers/models/__init__.py", "repo_id": "diffusers", "token_count": 4802 }
160
# Copyright 2025 The Lightricks team and The HuggingFace Team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin from ...utils.accelerate_utils import apply_forward_hook from ..activations import get_activation from ..embeddings import PixArtAlphaCombinedTimestepSizeEmbeddings from ..modeling_outputs import AutoencoderKLOutput from ..modeling_utils import ModelMixin from ..normalization import RMSNorm from .vae import DecoderOutput, DiagonalGaussianDistribution class LTXVideoCausalConv3d(nn.Module): def __init__( self, in_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, int, int]] = 3, stride: Union[int, Tuple[int, int, int]] = 1, dilation: Union[int, Tuple[int, int, int]] = 1, groups: int = 1, padding_mode: str = "zeros", is_causal: bool = True, ): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.is_causal = is_causal self.kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size, kernel_size, kernel_size) dilation = dilation if isinstance(dilation, tuple) else (dilation, 1, 1) stride = stride if isinstance(stride, tuple) else (stride, stride, stride) height_pad = self.kernel_size[1] // 2 width_pad = self.kernel_size[2] // 2 padding = (0, height_pad, width_pad) self.conv = nn.Conv3d( in_channels, out_channels, self.kernel_size, stride=stride, dilation=dilation, groups=groups, padding=padding, padding_mode=padding_mode, ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: time_kernel_size = self.kernel_size[0] if self.is_causal: pad_left = hidden_states[:, :, :1, :, :].repeat((1, 1, time_kernel_size - 1, 1, 1)) hidden_states = torch.concatenate([pad_left, hidden_states], dim=2) else: pad_left = hidden_states[:, :, :1, :, :].repeat((1, 1, (time_kernel_size - 1) // 2, 1, 1)) pad_right = hidden_states[:, :, -1:, :, :].repeat((1, 1, (time_kernel_size - 1) // 2, 1, 1)) hidden_states = torch.concatenate([pad_left, hidden_states, pad_right], dim=2) hidden_states = self.conv(hidden_states) return hidden_states class LTXVideoResnetBlock3d(nn.Module): r""" A 3D ResNet block used in the LTXVideo model. Args: in_channels (`int`): Number of input channels. out_channels (`int`, *optional*): Number of output channels. If None, defaults to `in_channels`. dropout (`float`, defaults to `0.0`): Dropout rate. eps (`float`, defaults to `1e-6`): Epsilon value for normalization layers. elementwise_affine (`bool`, defaults to `False`): Whether to enable elementwise affinity in the normalization layers. non_linearity (`str`, defaults to `"swish"`): Activation function to use. conv_shortcut (bool, defaults to `False`): Whether or not to use a convolution shortcut. """ def __init__( self, in_channels: int, out_channels: Optional[int] = None, dropout: float = 0.0, eps: float = 1e-6, elementwise_affine: bool = False, non_linearity: str = "swish", is_causal: bool = True, inject_noise: bool = False, timestep_conditioning: bool = False, ) -> None: super().__init__() out_channels = out_channels or in_channels self.nonlinearity = get_activation(non_linearity) self.norm1 = RMSNorm(in_channels, eps=1e-8, elementwise_affine=elementwise_affine) self.conv1 = LTXVideoCausalConv3d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, is_causal=is_causal ) self.norm2 = RMSNorm(out_channels, eps=1e-8, elementwise_affine=elementwise_affine) self.dropout = nn.Dropout(dropout) self.conv2 = LTXVideoCausalConv3d( in_channels=out_channels, out_channels=out_channels, kernel_size=3, is_causal=is_causal ) self.norm3 = None self.conv_shortcut = None if in_channels != out_channels: self.norm3 = nn.LayerNorm(in_channels, eps=eps, elementwise_affine=True, bias=True) self.conv_shortcut = LTXVideoCausalConv3d( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, is_causal=is_causal ) self.per_channel_scale1 = None self.per_channel_scale2 = None if inject_noise: self.per_channel_scale1 = nn.Parameter(torch.zeros(in_channels, 1, 1)) self.per_channel_scale2 = nn.Parameter(torch.zeros(in_channels, 1, 1)) self.scale_shift_table = None if timestep_conditioning: self.scale_shift_table = nn.Parameter(torch.randn(4, in_channels) / in_channels**0.5) def forward( self, inputs: torch.Tensor, temb: Optional[torch.Tensor] = None, generator: Optional[torch.Generator] = None ) -> torch.Tensor: hidden_states = inputs hidden_states = self.norm1(hidden_states.movedim(1, -1)).movedim(-1, 1) if self.scale_shift_table is not None: temb = temb.unflatten(1, (4, -1)) + self.scale_shift_table[None, ..., None, None, None] shift_1, scale_1, shift_2, scale_2 = temb.unbind(dim=1) hidden_states = hidden_states * (1 + scale_1) + shift_1 hidden_states = self.nonlinearity(hidden_states) hidden_states = self.conv1(hidden_states) if self.per_channel_scale1 is not None: spatial_shape = hidden_states.shape[-2:] spatial_noise = torch.randn( spatial_shape, generator=generator, device=hidden_states.device, dtype=hidden_states.dtype )[None] hidden_states = hidden_states + (spatial_noise * self.per_channel_scale1)[None, :, None, ...] hidden_states = self.norm2(hidden_states.movedim(1, -1)).movedim(-1, 1) if self.scale_shift_table is not None: hidden_states = hidden_states * (1 + scale_2) + shift_2 hidden_states = self.nonlinearity(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) if self.per_channel_scale2 is not None: spatial_shape = hidden_states.shape[-2:] spatial_noise = torch.randn( spatial_shape, generator=generator, device=hidden_states.device, dtype=hidden_states.dtype )[None] hidden_states = hidden_states + (spatial_noise * self.per_channel_scale2)[None, :, None, ...] if self.norm3 is not None: inputs = self.norm3(inputs.movedim(1, -1)).movedim(-1, 1) if self.conv_shortcut is not None: inputs = self.conv_shortcut(inputs) hidden_states = hidden_states + inputs return hidden_states class LTXVideoDownsampler3d(nn.Module): def __init__( self, in_channels: int, out_channels: int, stride: Union[int, Tuple[int, int, int]] = 1, is_causal: bool = True, padding_mode: str = "zeros", ) -> None: super().__init__() self.stride = stride if isinstance(stride, tuple) else (stride, stride, stride) self.group_size = (in_channels * stride[0] * stride[1] * stride[2]) // out_channels out_channels = out_channels // (self.stride[0] * self.stride[1] * self.stride[2]) self.conv = LTXVideoCausalConv3d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, is_causal=is_causal, padding_mode=padding_mode, ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = torch.cat([hidden_states[:, :, : self.stride[0] - 1], hidden_states], dim=2) residual = ( hidden_states.unflatten(4, (-1, self.stride[2])) .unflatten(3, (-1, self.stride[1])) .unflatten(2, (-1, self.stride[0])) ) residual = residual.permute(0, 1, 3, 5, 7, 2, 4, 6).flatten(1, 4) residual = residual.unflatten(1, (-1, self.group_size)) residual = residual.mean(dim=2) hidden_states = self.conv(hidden_states) hidden_states = ( hidden_states.unflatten(4, (-1, self.stride[2])) .unflatten(3, (-1, self.stride[1])) .unflatten(2, (-1, self.stride[0])) ) hidden_states = hidden_states.permute(0, 1, 3, 5, 7, 2, 4, 6).flatten(1, 4) hidden_states = hidden_states + residual return hidden_states class LTXVideoUpsampler3d(nn.Module): def __init__( self, in_channels: int, stride: Union[int, Tuple[int, int, int]] = 1, is_causal: bool = True, residual: bool = False, upscale_factor: int = 1, padding_mode: str = "zeros", ) -> None: super().__init__() self.stride = stride if isinstance(stride, tuple) else (stride, stride, stride) self.residual = residual self.upscale_factor = upscale_factor out_channels = (in_channels * stride[0] * stride[1] * stride[2]) // upscale_factor self.conv = LTXVideoCausalConv3d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, is_causal=is_causal, padding_mode=padding_mode, ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, num_channels, num_frames, height, width = hidden_states.shape if self.residual: residual = hidden_states.reshape( batch_size, -1, self.stride[0], self.stride[1], self.stride[2], num_frames, height, width ) residual = residual.permute(0, 1, 5, 2, 6, 3, 7, 4).flatten(6, 7).flatten(4, 5).flatten(2, 3) repeats = (self.stride[0] * self.stride[1] * self.stride[2]) // self.upscale_factor residual = residual.repeat(1, repeats, 1, 1, 1) residual = residual[:, :, self.stride[0] - 1 :] hidden_states = self.conv(hidden_states) hidden_states = hidden_states.reshape( batch_size, -1, self.stride[0], self.stride[1], self.stride[2], num_frames, height, width ) hidden_states = hidden_states.permute(0, 1, 5, 2, 6, 3, 7, 4).flatten(6, 7).flatten(4, 5).flatten(2, 3) hidden_states = hidden_states[:, :, self.stride[0] - 1 :] if self.residual: hidden_states = hidden_states + residual return hidden_states class LTXVideoDownBlock3D(nn.Module): r""" Down block used in the LTXVideo model. Args: in_channels (`int`): Number of input channels. out_channels (`int`, *optional*): Number of output channels. If None, defaults to `in_channels`. num_layers (`int`, defaults to `1`): Number of resnet layers. dropout (`float`, defaults to `0.0`): Dropout rate. resnet_eps (`float`, defaults to `1e-6`): Epsilon value for normalization layers. resnet_act_fn (`str`, defaults to `"swish"`): Activation function to use. spatio_temporal_scale (`bool`, defaults to `True`): Whether or not to use a downsampling layer. If not used, output dimension would be same as input dimension. Whether or not to downsample across temporal dimension. is_causal (`bool`, defaults to `True`): Whether this layer behaves causally (future frames depend only on past frames) or not. """ _supports_gradient_checkpointing = True def __init__( self, in_channels: int, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, resnet_eps: float = 1e-6, resnet_act_fn: str = "swish", spatio_temporal_scale: bool = True, is_causal: bool = True, ): super().__init__() out_channels = out_channels or in_channels resnets = [] for _ in range(num_layers): resnets.append( LTXVideoResnetBlock3d( in_channels=in_channels, out_channels=in_channels, dropout=dropout, eps=resnet_eps, non_linearity=resnet_act_fn, is_causal=is_causal, ) ) self.resnets = nn.ModuleList(resnets) self.downsamplers = None if spatio_temporal_scale: self.downsamplers = nn.ModuleList( [ LTXVideoCausalConv3d( in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=(2, 2, 2), is_causal=is_causal, ) ] ) self.conv_out = None if in_channels != out_channels: self.conv_out = LTXVideoResnetBlock3d( in_channels=in_channels, out_channels=out_channels, dropout=dropout, eps=resnet_eps, non_linearity=resnet_act_fn, is_causal=is_causal, ) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, generator: Optional[torch.Generator] = None, ) -> torch.Tensor: r"""Forward method of the `LTXDownBlock3D` class.""" for i, resnet in enumerate(self.resnets): if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb, generator) else: hidden_states = resnet(hidden_states, temb, generator) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) if self.conv_out is not None: hidden_states = self.conv_out(hidden_states, temb, generator) return hidden_states class LTXVideo095DownBlock3D(nn.Module): r""" Down block used in the LTXVideo model. Args: in_channels (`int`): Number of input channels. out_channels (`int`, *optional*): Number of output channels. If None, defaults to `in_channels`. num_layers (`int`, defaults to `1`): Number of resnet layers. dropout (`float`, defaults to `0.0`): Dropout rate. resnet_eps (`float`, defaults to `1e-6`): Epsilon value for normalization layers. resnet_act_fn (`str`, defaults to `"swish"`): Activation function to use. spatio_temporal_scale (`bool`, defaults to `True`): Whether or not to use a downsampling layer. If not used, output dimension would be same as input dimension. Whether or not to downsample across temporal dimension. is_causal (`bool`, defaults to `True`): Whether this layer behaves causally (future frames depend only on past frames) or not. """ _supports_gradient_checkpointing = True def __init__( self, in_channels: int, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, resnet_eps: float = 1e-6, resnet_act_fn: str = "swish", spatio_temporal_scale: bool = True, is_causal: bool = True, downsample_type: str = "conv", ): super().__init__() out_channels = out_channels or in_channels resnets = [] for _ in range(num_layers): resnets.append( LTXVideoResnetBlock3d( in_channels=in_channels, out_channels=in_channels, dropout=dropout, eps=resnet_eps, non_linearity=resnet_act_fn, is_causal=is_causal, ) ) self.resnets = nn.ModuleList(resnets) self.downsamplers = None if spatio_temporal_scale: self.downsamplers = nn.ModuleList() if downsample_type == "conv": self.downsamplers.append( LTXVideoCausalConv3d( in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=(2, 2, 2), is_causal=is_causal, ) ) elif downsample_type == "spatial": self.downsamplers.append( LTXVideoDownsampler3d( in_channels=in_channels, out_channels=out_channels, stride=(1, 2, 2), is_causal=is_causal ) ) elif downsample_type == "temporal": self.downsamplers.append( LTXVideoDownsampler3d( in_channels=in_channels, out_channels=out_channels, stride=(2, 1, 1), is_causal=is_causal ) ) elif downsample_type == "spatiotemporal": self.downsamplers.append( LTXVideoDownsampler3d( in_channels=in_channels, out_channels=out_channels, stride=(2, 2, 2), is_causal=is_causal ) ) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, generator: Optional[torch.Generator] = None, ) -> torch.Tensor: r"""Forward method of the `LTXDownBlock3D` class.""" for i, resnet in enumerate(self.resnets): if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb, generator) else: hidden_states = resnet(hidden_states, temb, generator) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) return hidden_states # Adapted from diffusers.models.autoencoders.autoencoder_kl_cogvideox.CogVideoMidBlock3d class LTXVideoMidBlock3d(nn.Module): r""" A middle block used in the LTXVideo model. Args: in_channels (`int`): Number of input channels. num_layers (`int`, defaults to `1`): Number of resnet layers. dropout (`float`, defaults to `0.0`): Dropout rate. resnet_eps (`float`, defaults to `1e-6`): Epsilon value for normalization layers. resnet_act_fn (`str`, defaults to `"swish"`): Activation function to use. is_causal (`bool`, defaults to `True`): Whether this layer behaves causally (future frames depend only on past frames) or not. """ _supports_gradient_checkpointing = True def __init__( self, in_channels: int, num_layers: int = 1, dropout: float = 0.0, resnet_eps: float = 1e-6, resnet_act_fn: str = "swish", is_causal: bool = True, inject_noise: bool = False, timestep_conditioning: bool = False, ) -> None: super().__init__() self.time_embedder = None if timestep_conditioning: self.time_embedder = PixArtAlphaCombinedTimestepSizeEmbeddings(in_channels * 4, 0) resnets = [] for _ in range(num_layers): resnets.append( LTXVideoResnetBlock3d( in_channels=in_channels, out_channels=in_channels, dropout=dropout, eps=resnet_eps, non_linearity=resnet_act_fn, is_causal=is_causal, inject_noise=inject_noise, timestep_conditioning=timestep_conditioning, ) ) self.resnets = nn.ModuleList(resnets) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, generator: Optional[torch.Generator] = None, ) -> torch.Tensor: r"""Forward method of the `LTXMidBlock3D` class.""" if self.time_embedder is not None: temb = self.time_embedder( timestep=temb.flatten(), resolution=None, aspect_ratio=None, batch_size=hidden_states.size(0), hidden_dtype=hidden_states.dtype, ) temb = temb.view(hidden_states.size(0), -1, 1, 1, 1) for i, resnet in enumerate(self.resnets): if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb, generator) else: hidden_states = resnet(hidden_states, temb, generator) return hidden_states class LTXVideoUpBlock3d(nn.Module): r""" Up block used in the LTXVideo model. Args: in_channels (`int`): Number of input channels. out_channels (`int`, *optional*): Number of output channels. If None, defaults to `in_channels`. num_layers (`int`, defaults to `1`): Number of resnet layers. dropout (`float`, defaults to `0.0`): Dropout rate. resnet_eps (`float`, defaults to `1e-6`): Epsilon value for normalization layers. resnet_act_fn (`str`, defaults to `"swish"`): Activation function to use. spatio_temporal_scale (`bool`, defaults to `True`): Whether or not to use a downsampling layer. If not used, output dimension would be same as input dimension. Whether or not to downsample across temporal dimension. is_causal (`bool`, defaults to `True`): Whether this layer behaves causally (future frames depend only on past frames) or not. """ _supports_gradient_checkpointing = True def __init__( self, in_channels: int, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, resnet_eps: float = 1e-6, resnet_act_fn: str = "swish", spatio_temporal_scale: bool = True, is_causal: bool = True, inject_noise: bool = False, timestep_conditioning: bool = False, upsample_residual: bool = False, upscale_factor: int = 1, ): super().__init__() out_channels = out_channels or in_channels self.time_embedder = None if timestep_conditioning: self.time_embedder = PixArtAlphaCombinedTimestepSizeEmbeddings(in_channels * 4, 0) self.conv_in = None if in_channels != out_channels: self.conv_in = LTXVideoResnetBlock3d( in_channels=in_channels, out_channels=out_channels, dropout=dropout, eps=resnet_eps, non_linearity=resnet_act_fn, is_causal=is_causal, inject_noise=inject_noise, timestep_conditioning=timestep_conditioning, ) self.upsamplers = None if spatio_temporal_scale: self.upsamplers = nn.ModuleList( [ LTXVideoUpsampler3d( out_channels * upscale_factor, stride=(2, 2, 2), is_causal=is_causal, residual=upsample_residual, upscale_factor=upscale_factor, ) ] ) resnets = [] for _ in range(num_layers): resnets.append( LTXVideoResnetBlock3d( in_channels=out_channels, out_channels=out_channels, dropout=dropout, eps=resnet_eps, non_linearity=resnet_act_fn, is_causal=is_causal, inject_noise=inject_noise, timestep_conditioning=timestep_conditioning, ) ) self.resnets = nn.ModuleList(resnets) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, generator: Optional[torch.Generator] = None, ) -> torch.Tensor: if self.conv_in is not None: hidden_states = self.conv_in(hidden_states, temb, generator) if self.time_embedder is not None: temb = self.time_embedder( timestep=temb.flatten(), resolution=None, aspect_ratio=None, batch_size=hidden_states.size(0), hidden_dtype=hidden_states.dtype, ) temb = temb.view(hidden_states.size(0), -1, 1, 1, 1) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) for i, resnet in enumerate(self.resnets): if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb, generator) else: hidden_states = resnet(hidden_states, temb, generator) return hidden_states class LTXVideoEncoder3d(nn.Module): r""" The `LTXVideoEncoder3d` layer of a variational autoencoder that encodes input video samples to its latent representation. Args: in_channels (`int`, defaults to 3): Number of input channels. out_channels (`int`, defaults to 128): Number of latent channels. block_out_channels (`Tuple[int, ...]`, defaults to `(128, 256, 512, 512)`): The number of output channels for each block. spatio_temporal_scaling (`Tuple[bool, ...], defaults to `(True, True, True, False)`: Whether a block should contain spatio-temporal downscaling layers or not. layers_per_block (`Tuple[int, ...]`, defaults to `(4, 3, 3, 3, 4)`): The number of layers per block. patch_size (`int`, defaults to `4`): The size of spatial patches. patch_size_t (`int`, defaults to `1`): The size of temporal patches. resnet_norm_eps (`float`, defaults to `1e-6`): Epsilon value for ResNet normalization layers. is_causal (`bool`, defaults to `True`): Whether this layer behaves causally (future frames depend only on past frames) or not. """ def __init__( self, in_channels: int = 3, out_channels: int = 128, block_out_channels: Tuple[int, ...] = (128, 256, 512, 512), down_block_types: Tuple[str, ...] = ( "LTXVideoDownBlock3D", "LTXVideoDownBlock3D", "LTXVideoDownBlock3D", "LTXVideoDownBlock3D", ), spatio_temporal_scaling: Tuple[bool, ...] = (True, True, True, False), layers_per_block: Tuple[int, ...] = (4, 3, 3, 3, 4), downsample_type: Tuple[str, ...] = ("conv", "conv", "conv", "conv"), patch_size: int = 4, patch_size_t: int = 1, resnet_norm_eps: float = 1e-6, is_causal: bool = True, ): super().__init__() self.patch_size = patch_size self.patch_size_t = patch_size_t self.in_channels = in_channels * patch_size**2 output_channel = block_out_channels[0] self.conv_in = LTXVideoCausalConv3d( in_channels=self.in_channels, out_channels=output_channel, kernel_size=3, stride=1, is_causal=is_causal, ) # down blocks is_ltx_095 = down_block_types[-1] == "LTXVideo095DownBlock3D" num_block_out_channels = len(block_out_channels) - (1 if is_ltx_095 else 0) self.down_blocks = nn.ModuleList([]) for i in range(num_block_out_channels): input_channel = output_channel if not is_ltx_095: output_channel = block_out_channels[i + 1] if i + 1 < num_block_out_channels else block_out_channels[i] else: output_channel = block_out_channels[i + 1] if down_block_types[i] == "LTXVideoDownBlock3D": down_block = LTXVideoDownBlock3D( in_channels=input_channel, out_channels=output_channel, num_layers=layers_per_block[i], resnet_eps=resnet_norm_eps, spatio_temporal_scale=spatio_temporal_scaling[i], is_causal=is_causal, ) elif down_block_types[i] == "LTXVideo095DownBlock3D": down_block = LTXVideo095DownBlock3D( in_channels=input_channel, out_channels=output_channel, num_layers=layers_per_block[i], resnet_eps=resnet_norm_eps, spatio_temporal_scale=spatio_temporal_scaling[i], is_causal=is_causal, downsample_type=downsample_type[i], ) else: raise ValueError(f"Unknown down block type: {down_block_types[i]}") self.down_blocks.append(down_block) # mid block self.mid_block = LTXVideoMidBlock3d( in_channels=output_channel, num_layers=layers_per_block[-1], resnet_eps=resnet_norm_eps, is_causal=is_causal, ) # out self.norm_out = RMSNorm(out_channels, eps=1e-8, elementwise_affine=False) self.conv_act = nn.SiLU() self.conv_out = LTXVideoCausalConv3d( in_channels=output_channel, out_channels=out_channels + 1, kernel_size=3, stride=1, is_causal=is_causal ) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: r"""The forward method of the `LTXVideoEncoder3d` class.""" p = self.patch_size p_t = self.patch_size_t batch_size, num_channels, num_frames, height, width = hidden_states.shape post_patch_num_frames = num_frames // p_t post_patch_height = height // p post_patch_width = width // p hidden_states = hidden_states.reshape( batch_size, num_channels, post_patch_num_frames, p_t, post_patch_height, p, post_patch_width, p ) # Thanks for driving me insane with the weird patching order :( hidden_states = hidden_states.permute(0, 1, 3, 7, 5, 2, 4, 6).flatten(1, 4) hidden_states = self.conv_in(hidden_states) if torch.is_grad_enabled() and self.gradient_checkpointing: for down_block in self.down_blocks: hidden_states = self._gradient_checkpointing_func(down_block, hidden_states) hidden_states = self._gradient_checkpointing_func(self.mid_block, hidden_states) else: for down_block in self.down_blocks: hidden_states = down_block(hidden_states) hidden_states = self.mid_block(hidden_states) hidden_states = self.norm_out(hidden_states.movedim(1, -1)).movedim(-1, 1) hidden_states = self.conv_act(hidden_states) hidden_states = self.conv_out(hidden_states) last_channel = hidden_states[:, -1:] last_channel = last_channel.repeat(1, hidden_states.size(1) - 2, 1, 1, 1) hidden_states = torch.cat([hidden_states, last_channel], dim=1) return hidden_states class LTXVideoDecoder3d(nn.Module): r""" The `LTXVideoDecoder3d` layer of a variational autoencoder that decodes its latent representation into an output sample. Args: in_channels (`int`, defaults to 128): Number of latent channels. out_channels (`int`, defaults to 3): Number of output channels. block_out_channels (`Tuple[int, ...]`, defaults to `(128, 256, 512, 512)`): The number of output channels for each block. spatio_temporal_scaling (`Tuple[bool, ...], defaults to `(True, True, True, False)`: Whether a block should contain spatio-temporal upscaling layers or not. layers_per_block (`Tuple[int, ...]`, defaults to `(4, 3, 3, 3, 4)`): The number of layers per block. patch_size (`int`, defaults to `4`): The size of spatial patches. patch_size_t (`int`, defaults to `1`): The size of temporal patches. resnet_norm_eps (`float`, defaults to `1e-6`): Epsilon value for ResNet normalization layers. is_causal (`bool`, defaults to `False`): Whether this layer behaves causally (future frames depend only on past frames) or not. timestep_conditioning (`bool`, defaults to `False`): Whether to condition the model on timesteps. """ def __init__( self, in_channels: int = 128, out_channels: int = 3, block_out_channels: Tuple[int, ...] = (128, 256, 512, 512), spatio_temporal_scaling: Tuple[bool, ...] = (True, True, True, False), layers_per_block: Tuple[int, ...] = (4, 3, 3, 3, 4), patch_size: int = 4, patch_size_t: int = 1, resnet_norm_eps: float = 1e-6, is_causal: bool = False, inject_noise: Tuple[bool, ...] = (False, False, False, False), timestep_conditioning: bool = False, upsample_residual: Tuple[bool, ...] = (False, False, False, False), upsample_factor: Tuple[bool, ...] = (1, 1, 1, 1), ) -> None: super().__init__() self.patch_size = patch_size self.patch_size_t = patch_size_t self.out_channels = out_channels * patch_size**2 block_out_channels = tuple(reversed(block_out_channels)) spatio_temporal_scaling = tuple(reversed(spatio_temporal_scaling)) layers_per_block = tuple(reversed(layers_per_block)) inject_noise = tuple(reversed(inject_noise)) upsample_residual = tuple(reversed(upsample_residual)) upsample_factor = tuple(reversed(upsample_factor)) output_channel = block_out_channels[0] self.conv_in = LTXVideoCausalConv3d( in_channels=in_channels, out_channels=output_channel, kernel_size=3, stride=1, is_causal=is_causal ) self.mid_block = LTXVideoMidBlock3d( in_channels=output_channel, num_layers=layers_per_block[0], resnet_eps=resnet_norm_eps, is_causal=is_causal, inject_noise=inject_noise[0], timestep_conditioning=timestep_conditioning, ) # up blocks num_block_out_channels = len(block_out_channels) self.up_blocks = nn.ModuleList([]) for i in range(num_block_out_channels): input_channel = output_channel // upsample_factor[i] output_channel = block_out_channels[i] // upsample_factor[i] up_block = LTXVideoUpBlock3d( in_channels=input_channel, out_channels=output_channel, num_layers=layers_per_block[i + 1], resnet_eps=resnet_norm_eps, spatio_temporal_scale=spatio_temporal_scaling[i], is_causal=is_causal, inject_noise=inject_noise[i + 1], timestep_conditioning=timestep_conditioning, upsample_residual=upsample_residual[i], upscale_factor=upsample_factor[i], ) self.up_blocks.append(up_block) # out self.norm_out = RMSNorm(out_channels, eps=1e-8, elementwise_affine=False) self.conv_act = nn.SiLU() self.conv_out = LTXVideoCausalConv3d( in_channels=output_channel, out_channels=self.out_channels, kernel_size=3, stride=1, is_causal=is_causal ) # timestep embedding self.time_embedder = None self.scale_shift_table = None self.timestep_scale_multiplier = None if timestep_conditioning: self.timestep_scale_multiplier = nn.Parameter(torch.tensor(1000.0, dtype=torch.float32)) self.time_embedder = PixArtAlphaCombinedTimestepSizeEmbeddings(output_channel * 2, 0) self.scale_shift_table = nn.Parameter(torch.randn(2, output_channel) / output_channel**0.5) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor: hidden_states = self.conv_in(hidden_states) if self.timestep_scale_multiplier is not None: temb = temb * self.timestep_scale_multiplier if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(self.mid_block, hidden_states, temb) for up_block in self.up_blocks: hidden_states = self._gradient_checkpointing_func(up_block, hidden_states, temb) else: hidden_states = self.mid_block(hidden_states, temb) for up_block in self.up_blocks: hidden_states = up_block(hidden_states, temb) hidden_states = self.norm_out(hidden_states.movedim(1, -1)).movedim(-1, 1) if self.time_embedder is not None: temb = self.time_embedder( timestep=temb.flatten(), resolution=None, aspect_ratio=None, batch_size=hidden_states.size(0), hidden_dtype=hidden_states.dtype, ) temb = temb.view(hidden_states.size(0), -1, 1, 1, 1).unflatten(1, (2, -1)) temb = temb + self.scale_shift_table[None, ..., None, None, None] shift, scale = temb.unbind(dim=1) hidden_states = hidden_states * (1 + scale) + shift hidden_states = self.conv_act(hidden_states) hidden_states = self.conv_out(hidden_states) p = self.patch_size p_t = self.patch_size_t batch_size, num_channels, num_frames, height, width = hidden_states.shape hidden_states = hidden_states.reshape(batch_size, -1, p_t, p, p, num_frames, height, width) hidden_states = hidden_states.permute(0, 1, 5, 2, 6, 4, 7, 3).flatten(6, 7).flatten(4, 5).flatten(2, 3) return hidden_states class AutoencoderKLLTXVideo(ModelMixin, ConfigMixin, FromOriginalModelMixin): r""" A VAE model with KL loss for encoding images into latents and decoding latent representations into images. Used in [LTX](https://huggingface.co/Lightricks/LTX-Video). This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Args: in_channels (`int`, defaults to `3`): Number of input channels. out_channels (`int`, defaults to `3`): Number of output channels. latent_channels (`int`, defaults to `128`): Number of latent channels. block_out_channels (`Tuple[int, ...]`, defaults to `(128, 256, 512, 512)`): The number of output channels for each block. spatio_temporal_scaling (`Tuple[bool, ...], defaults to `(True, True, True, False)`: Whether a block should contain spatio-temporal downscaling or not. layers_per_block (`Tuple[int, ...]`, defaults to `(4, 3, 3, 3, 4)`): The number of layers per block. patch_size (`int`, defaults to `4`): The size of spatial patches. patch_size_t (`int`, defaults to `1`): The size of temporal patches. resnet_norm_eps (`float`, defaults to `1e-6`): Epsilon value for ResNet normalization layers. scaling_factor (`float`, *optional*, defaults to `1.0`): The component-wise standard deviation of the trained latent space computed using the first batch of the training set. This is used to scale the latent space to have unit variance when training the diffusion model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) paper. encoder_causal (`bool`, defaults to `True`): Whether the encoder should behave causally (future frames depend only on past frames) or not. decoder_causal (`bool`, defaults to `False`): Whether the decoder should behave causally (future frames depend only on past frames) or not. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, in_channels: int = 3, out_channels: int = 3, latent_channels: int = 128, block_out_channels: Tuple[int, ...] = (128, 256, 512, 512), down_block_types: Tuple[str, ...] = ( "LTXVideoDownBlock3D", "LTXVideoDownBlock3D", "LTXVideoDownBlock3D", "LTXVideoDownBlock3D", ), decoder_block_out_channels: Tuple[int, ...] = (128, 256, 512, 512), layers_per_block: Tuple[int, ...] = (4, 3, 3, 3, 4), decoder_layers_per_block: Tuple[int, ...] = (4, 3, 3, 3, 4), spatio_temporal_scaling: Tuple[bool, ...] = (True, True, True, False), decoder_spatio_temporal_scaling: Tuple[bool, ...] = (True, True, True, False), decoder_inject_noise: Tuple[bool, ...] = (False, False, False, False, False), downsample_type: Tuple[str, ...] = ("conv", "conv", "conv", "conv"), upsample_residual: Tuple[bool, ...] = (False, False, False, False), upsample_factor: Tuple[int, ...] = (1, 1, 1, 1), timestep_conditioning: bool = False, patch_size: int = 4, patch_size_t: int = 1, resnet_norm_eps: float = 1e-6, scaling_factor: float = 1.0, encoder_causal: bool = True, decoder_causal: bool = False, spatial_compression_ratio: int = None, temporal_compression_ratio: int = None, ) -> None: super().__init__() self.encoder = LTXVideoEncoder3d( in_channels=in_channels, out_channels=latent_channels, block_out_channels=block_out_channels, down_block_types=down_block_types, spatio_temporal_scaling=spatio_temporal_scaling, layers_per_block=layers_per_block, downsample_type=downsample_type, patch_size=patch_size, patch_size_t=patch_size_t, resnet_norm_eps=resnet_norm_eps, is_causal=encoder_causal, ) self.decoder = LTXVideoDecoder3d( in_channels=latent_channels, out_channels=out_channels, block_out_channels=decoder_block_out_channels, spatio_temporal_scaling=decoder_spatio_temporal_scaling, layers_per_block=decoder_layers_per_block, patch_size=patch_size, patch_size_t=patch_size_t, resnet_norm_eps=resnet_norm_eps, is_causal=decoder_causal, timestep_conditioning=timestep_conditioning, inject_noise=decoder_inject_noise, upsample_residual=upsample_residual, upsample_factor=upsample_factor, ) latents_mean = torch.zeros((latent_channels,), requires_grad=False) latents_std = torch.ones((latent_channels,), requires_grad=False) self.register_buffer("latents_mean", latents_mean, persistent=True) self.register_buffer("latents_std", latents_std, persistent=True) self.spatial_compression_ratio = ( patch_size * 2 ** sum(spatio_temporal_scaling) if spatial_compression_ratio is None else spatial_compression_ratio ) self.temporal_compression_ratio = ( patch_size_t * 2 ** sum(spatio_temporal_scaling) if temporal_compression_ratio is None else temporal_compression_ratio ) # When decoding a batch of video latents at a time, one can save memory by slicing across the batch dimension # to perform decoding of a single video latent at a time. self.use_slicing = False # When decoding spatially large video latents, the memory requirement is very high. By breaking the video latent # frames spatially into smaller tiles and performing multiple forward passes for decoding, and then blending the # intermediate tiles together, the memory requirement can be lowered. self.use_tiling = False # When decoding temporally long video latents, the memory requirement is very high. By decoding latent frames # at a fixed frame batch size (based on `self.num_latent_frames_batch_sizes`), the memory requirement can be lowered. self.use_framewise_encoding = False self.use_framewise_decoding = False # This can be configured based on the amount of GPU memory available. # `16` for sample frames and `2` for latent frames are sensible defaults for consumer GPUs. # Setting it to higher values results in higher memory usage. self.num_sample_frames_batch_size = 16 self.num_latent_frames_batch_size = 2 # The minimal tile height and width for spatial tiling to be used self.tile_sample_min_height = 512 self.tile_sample_min_width = 512 self.tile_sample_min_num_frames = 16 # The minimal distance between two spatial tiles self.tile_sample_stride_height = 448 self.tile_sample_stride_width = 448 self.tile_sample_stride_num_frames = 8 def enable_tiling( self, tile_sample_min_height: Optional[int] = None, tile_sample_min_width: Optional[int] = None, tile_sample_min_num_frames: Optional[int] = None, tile_sample_stride_height: Optional[float] = None, tile_sample_stride_width: Optional[float] = None, tile_sample_stride_num_frames: Optional[float] = None, ) -> None: r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. Args: tile_sample_min_height (`int`, *optional*): The minimum height required for a sample to be separated into tiles across the height dimension. tile_sample_min_width (`int`, *optional*): The minimum width required for a sample to be separated into tiles across the width dimension. tile_sample_stride_height (`int`, *optional*): The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are no tiling artifacts produced across the height dimension. tile_sample_stride_width (`int`, *optional*): The stride between two consecutive horizontal tiles. This is to ensure that there are no tiling artifacts produced across the width dimension. """ self.use_tiling = True self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width self.tile_sample_min_num_frames = tile_sample_min_num_frames or self.tile_sample_min_num_frames self.tile_sample_stride_height = tile_sample_stride_height or self.tile_sample_stride_height self.tile_sample_stride_width = tile_sample_stride_width or self.tile_sample_stride_width self.tile_sample_stride_num_frames = tile_sample_stride_num_frames or self.tile_sample_stride_num_frames def disable_tiling(self) -> None: r""" Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.use_tiling = False def enable_slicing(self) -> None: r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.use_slicing = True def disable_slicing(self) -> None: r""" Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.use_slicing = False def _encode(self, x: torch.Tensor) -> torch.Tensor: batch_size, num_channels, num_frames, height, width = x.shape if self.use_framewise_decoding and num_frames > self.tile_sample_min_num_frames: return self._temporal_tiled_encode(x) if self.use_tiling and (width > self.tile_sample_min_width or height > self.tile_sample_min_height): return self.tiled_encode(x) enc = self.encoder(x) return enc @apply_forward_hook def encode( self, x: torch.Tensor, return_dict: bool = True ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: """ Encode a batch of images into latents. Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: The latent representations of the encoded videos. If `return_dict` is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and x.shape[0] > 1: encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)] h = torch.cat(encoded_slices) else: h = self._encode(x) posterior = DiagonalGaussianDistribution(h) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) def _decode( self, z: torch.Tensor, temb: Optional[torch.Tensor] = None, return_dict: bool = True ) -> Union[DecoderOutput, torch.Tensor]: batch_size, num_channels, num_frames, height, width = z.shape tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio tile_latent_min_num_frames = self.tile_sample_min_num_frames // self.temporal_compression_ratio if self.use_framewise_decoding and num_frames > tile_latent_min_num_frames: return self._temporal_tiled_decode(z, temb, return_dict=return_dict) if self.use_tiling and (width > tile_latent_min_width or height > tile_latent_min_height): return self.tiled_decode(z, temb, return_dict=return_dict) dec = self.decoder(z, temb) if not return_dict: return (dec,) return DecoderOutput(sample=dec) @apply_forward_hook def decode( self, z: torch.Tensor, temb: Optional[torch.Tensor] = None, return_dict: bool = True ) -> Union[DecoderOutput, torch.Tensor]: """ Decode a batch of images. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and z.shape[0] > 1: if temb is not None: decoded_slices = [ self._decode(z_slice, t_slice).sample for z_slice, t_slice in (z.split(1), temb.split(1)) ] else: decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] decoded = torch.cat(decoded_slices) else: decoded = self._decode(z, temb).sample if not return_dict: return (decoded,) return DecoderOutput(sample=decoded) def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[3], b.shape[3], blend_extent) for y in range(blend_extent): b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * ( y / blend_extent ) return b def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[4], b.shape[4], blend_extent) for x in range(blend_extent): b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * ( x / blend_extent ) return b def blend_t(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[-3], b.shape[-3], blend_extent) for x in range(blend_extent): b[:, :, x, :, :] = a[:, :, -blend_extent + x, :, :] * (1 - x / blend_extent) + b[:, :, x, :, :] * ( x / blend_extent ) return b def tiled_encode(self, x: torch.Tensor) -> torch.Tensor: r"""Encode a batch of images using a tiled encoder. Args: x (`torch.Tensor`): Input batch of videos. Returns: `torch.Tensor`: The latent representation of the encoded videos. """ batch_size, num_channels, num_frames, height, width = x.shape latent_height = height // self.spatial_compression_ratio latent_width = width // self.spatial_compression_ratio tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio blend_height = tile_latent_min_height - tile_latent_stride_height blend_width = tile_latent_min_width - tile_latent_stride_width # Split x into overlapping tiles and encode them separately. # The tiles have an overlap to avoid seams between tiles. rows = [] for i in range(0, height, self.tile_sample_stride_height): row = [] for j in range(0, width, self.tile_sample_stride_width): time = self.encoder( x[:, :, :, i : i + self.tile_sample_min_height, j : j + self.tile_sample_min_width] ) row.append(time) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_height) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_width) result_row.append(tile[:, :, :, :tile_latent_stride_height, :tile_latent_stride_width]) result_rows.append(torch.cat(result_row, dim=4)) enc = torch.cat(result_rows, dim=3)[:, :, :, :latent_height, :latent_width] return enc def tiled_decode( self, z: torch.Tensor, temb: Optional[torch.Tensor], return_dict: bool = True ) -> Union[DecoderOutput, torch.Tensor]: r""" Decode a batch of images using a tiled decoder. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ batch_size, num_channels, num_frames, height, width = z.shape sample_height = height * self.spatial_compression_ratio sample_width = width * self.spatial_compression_ratio tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio blend_height = self.tile_sample_min_height - self.tile_sample_stride_height blend_width = self.tile_sample_min_width - self.tile_sample_stride_width # Split z into overlapping tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. rows = [] for i in range(0, height, tile_latent_stride_height): row = [] for j in range(0, width, tile_latent_stride_width): time = self.decoder(z[:, :, :, i : i + tile_latent_min_height, j : j + tile_latent_min_width], temb) row.append(time) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_height) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_width) result_row.append(tile[:, :, :, : self.tile_sample_stride_height, : self.tile_sample_stride_width]) result_rows.append(torch.cat(result_row, dim=4)) dec = torch.cat(result_rows, dim=3)[:, :, :, :sample_height, :sample_width] if not return_dict: return (dec,) return DecoderOutput(sample=dec) def _temporal_tiled_encode(self, x: torch.Tensor) -> AutoencoderKLOutput: batch_size, num_channels, num_frames, height, width = x.shape latent_num_frames = (num_frames - 1) // self.temporal_compression_ratio + 1 tile_latent_min_num_frames = self.tile_sample_min_num_frames // self.temporal_compression_ratio tile_latent_stride_num_frames = self.tile_sample_stride_num_frames // self.temporal_compression_ratio blend_num_frames = tile_latent_min_num_frames - tile_latent_stride_num_frames row = [] for i in range(0, num_frames, self.tile_sample_stride_num_frames): tile = x[:, :, i : i + self.tile_sample_min_num_frames + 1, :, :] if self.use_tiling and (height > self.tile_sample_min_height or width > self.tile_sample_min_width): tile = self.tiled_encode(tile) else: tile = self.encoder(tile) if i > 0: tile = tile[:, :, 1:, :, :] row.append(tile) result_row = [] for i, tile in enumerate(row): if i > 0: tile = self.blend_t(row[i - 1], tile, blend_num_frames) result_row.append(tile[:, :, :tile_latent_stride_num_frames, :, :]) else: result_row.append(tile[:, :, : tile_latent_stride_num_frames + 1, :, :]) enc = torch.cat(result_row, dim=2)[:, :, :latent_num_frames] return enc def _temporal_tiled_decode( self, z: torch.Tensor, temb: Optional[torch.Tensor], return_dict: bool = True ) -> Union[DecoderOutput, torch.Tensor]: batch_size, num_channels, num_frames, height, width = z.shape num_sample_frames = (num_frames - 1) * self.temporal_compression_ratio + 1 tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio tile_latent_min_num_frames = self.tile_sample_min_num_frames // self.temporal_compression_ratio tile_latent_stride_num_frames = self.tile_sample_stride_num_frames // self.temporal_compression_ratio blend_num_frames = self.tile_sample_min_num_frames - self.tile_sample_stride_num_frames row = [] for i in range(0, num_frames, tile_latent_stride_num_frames): tile = z[:, :, i : i + tile_latent_min_num_frames + 1, :, :] if self.use_tiling and (tile.shape[-1] > tile_latent_min_width or tile.shape[-2] > tile_latent_min_height): decoded = self.tiled_decode(tile, temb, return_dict=True).sample else: decoded = self.decoder(tile, temb) if i > 0: decoded = decoded[:, :, :-1, :, :] row.append(decoded) result_row = [] for i, tile in enumerate(row): if i > 0: tile = self.blend_t(row[i - 1], tile, blend_num_frames) tile = tile[:, :, : self.tile_sample_stride_num_frames, :, :] result_row.append(tile) else: result_row.append(tile[:, :, : self.tile_sample_stride_num_frames + 1, :, :]) dec = torch.cat(result_row, dim=2)[:, :, :num_sample_frames] if not return_dict: return (dec,) return DecoderOutput(sample=dec) def forward( self, sample: torch.Tensor, temb: Optional[torch.Tensor] = None, sample_posterior: bool = False, return_dict: bool = True, generator: Optional[torch.Generator] = None, ) -> Union[torch.Tensor, torch.Tensor]: x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z, temb) if not return_dict: return (dec.sample,) return dec
diffusers/src/diffusers/models/autoencoders/autoencoder_kl_ltx.py/0
{ "file_path": "diffusers/src/diffusers/models/autoencoders/autoencoder_kl_ltx.py", "repo_id": "diffusers", "token_count": 30359 }
161
from ...utils import is_flax_available, is_torch_available if is_torch_available(): from .controlnet import ControlNetModel, ControlNetOutput from .controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel from .controlnet_hunyuan import ( HunyuanControlNetOutput, HunyuanDiT2DControlNetModel, HunyuanDiT2DMultiControlNetModel, ) from .controlnet_qwenimage import QwenImageControlNetModel, QwenImageMultiControlNetModel from .controlnet_sana import SanaControlNetModel from .controlnet_sd3 import SD3ControlNetModel, SD3ControlNetOutput, SD3MultiControlNetModel from .controlnet_sparsectrl import ( SparseControlNetConditioningEmbedding, SparseControlNetModel, SparseControlNetOutput, ) from .controlnet_union import ControlNetUnionModel from .controlnet_xs import ControlNetXSAdapter, ControlNetXSOutput, UNetControlNetXSModel from .multicontrolnet import MultiControlNetModel from .multicontrolnet_union import MultiControlNetUnionModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel
diffusers/src/diffusers/models/controlnets/__init__.py/0
{ "file_path": "diffusers/src/diffusers/models/controlnets/__init__.py", "repo_id": "diffusers", "token_count": 393 }
162
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # IMPORTANT: # ################################################################### # ----------------------------------------------------------------# # This file is deprecated and will be removed soon # # (as soon as PEFT will become a required dependency for LoRA) # # ----------------------------------------------------------------# ################################################################### from typing import Optional, Tuple, Union import torch import torch.nn.functional as F from torch import nn from ..utils import deprecate, logging from ..utils.import_utils import is_transformers_available if is_transformers_available(): from transformers import CLIPTextModel, CLIPTextModelWithProjection logger = logging.get_logger(__name__) # pylint: disable=invalid-name def text_encoder_attn_modules(text_encoder: nn.Module): attn_modules = [] if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)): for i, layer in enumerate(text_encoder.text_model.encoder.layers): name = f"text_model.encoder.layers.{i}.self_attn" mod = layer.self_attn attn_modules.append((name, mod)) else: raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}") return attn_modules def text_encoder_mlp_modules(text_encoder: nn.Module): mlp_modules = [] if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)): for i, layer in enumerate(text_encoder.text_model.encoder.layers): mlp_mod = layer.mlp name = f"text_model.encoder.layers.{i}.mlp" mlp_modules.append((name, mlp_mod)) else: raise ValueError(f"do not know how to get mlp modules for: {text_encoder.__class__.__name__}") return mlp_modules def adjust_lora_scale_text_encoder(text_encoder, lora_scale: float = 1.0): for _, attn_module in text_encoder_attn_modules(text_encoder): if isinstance(attn_module.q_proj, PatchedLoraProjection): attn_module.q_proj.lora_scale = lora_scale attn_module.k_proj.lora_scale = lora_scale attn_module.v_proj.lora_scale = lora_scale attn_module.out_proj.lora_scale = lora_scale for _, mlp_module in text_encoder_mlp_modules(text_encoder): if isinstance(mlp_module.fc1, PatchedLoraProjection): mlp_module.fc1.lora_scale = lora_scale mlp_module.fc2.lora_scale = lora_scale class PatchedLoraProjection(torch.nn.Module): def __init__(self, regular_linear_layer, lora_scale=1, network_alpha=None, rank=4, dtype=None): deprecation_message = "Use of `PatchedLoraProjection` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." deprecate("PatchedLoraProjection", "1.0.0", deprecation_message) super().__init__() from ..models.lora import LoRALinearLayer self.regular_linear_layer = regular_linear_layer device = self.regular_linear_layer.weight.device if dtype is None: dtype = self.regular_linear_layer.weight.dtype self.lora_linear_layer = LoRALinearLayer( self.regular_linear_layer.in_features, self.regular_linear_layer.out_features, network_alpha=network_alpha, device=device, dtype=dtype, rank=rank, ) self.lora_scale = lora_scale # overwrite PyTorch's `state_dict` to be sure that only the 'regular_linear_layer' weights are saved # when saving the whole text encoder model and when LoRA is unloaded or fused def state_dict(self, *args, destination=None, prefix="", keep_vars=False): if self.lora_linear_layer is None: return self.regular_linear_layer.state_dict( *args, destination=destination, prefix=prefix, keep_vars=keep_vars ) return super().state_dict(*args, destination=destination, prefix=prefix, keep_vars=keep_vars) def _fuse_lora(self, lora_scale=1.0, safe_fusing=False): if self.lora_linear_layer is None: return dtype, device = self.regular_linear_layer.weight.data.dtype, self.regular_linear_layer.weight.data.device w_orig = self.regular_linear_layer.weight.data.float() w_up = self.lora_linear_layer.up.weight.data.float() w_down = self.lora_linear_layer.down.weight.data.float() if self.lora_linear_layer.network_alpha is not None: w_up = w_up * self.lora_linear_layer.network_alpha / self.lora_linear_layer.rank fused_weight = w_orig + (lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0]) if safe_fusing and torch.isnan(fused_weight).any().item(): raise ValueError( "This LoRA weight seems to be broken. " f"Encountered NaN values when trying to fuse LoRA weights for {self}." "LoRA weights will not be fused." ) self.regular_linear_layer.weight.data = fused_weight.to(device=device, dtype=dtype) # we can drop the lora layer now self.lora_linear_layer = None # offload the up and down matrices to CPU to not blow the memory self.w_up = w_up.cpu() self.w_down = w_down.cpu() self.lora_scale = lora_scale def _unfuse_lora(self): if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None): return fused_weight = self.regular_linear_layer.weight.data dtype, device = fused_weight.dtype, fused_weight.device w_up = self.w_up.to(device=device).float() w_down = self.w_down.to(device).float() unfused_weight = fused_weight.float() - (self.lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0]) self.regular_linear_layer.weight.data = unfused_weight.to(device=device, dtype=dtype) self.w_up = None self.w_down = None def forward(self, input): if self.lora_scale is None: self.lora_scale = 1.0 if self.lora_linear_layer is None: return self.regular_linear_layer(input) return self.regular_linear_layer(input) + (self.lora_scale * self.lora_linear_layer(input)) class LoRALinearLayer(nn.Module): r""" A linear layer that is used with LoRA. Parameters: in_features (`int`): Number of input features. out_features (`int`): Number of output features. rank (`int`, `optional`, defaults to 4): The rank of the LoRA layer. network_alpha (`float`, `optional`, defaults to `None`): The value of the network alpha used for stable learning and preventing underflow. This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning device (`torch.device`, `optional`, defaults to `None`): The device to use for the layer's weights. dtype (`torch.dtype`, `optional`, defaults to `None`): The dtype to use for the layer's weights. """ def __init__( self, in_features: int, out_features: int, rank: int = 4, network_alpha: Optional[float] = None, device: Optional[Union[torch.device, str]] = None, dtype: Optional[torch.dtype] = None, ): super().__init__() deprecation_message = "Use of `LoRALinearLayer` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." deprecate("LoRALinearLayer", "1.0.0", deprecation_message) self.down = nn.Linear(in_features, rank, bias=False, device=device, dtype=dtype) self.up = nn.Linear(rank, out_features, bias=False, device=device, dtype=dtype) # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning self.network_alpha = network_alpha self.rank = rank self.out_features = out_features self.in_features = in_features nn.init.normal_(self.down.weight, std=1 / rank) nn.init.zeros_(self.up.weight) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: orig_dtype = hidden_states.dtype dtype = self.down.weight.dtype down_hidden_states = self.down(hidden_states.to(dtype)) up_hidden_states = self.up(down_hidden_states) if self.network_alpha is not None: up_hidden_states *= self.network_alpha / self.rank return up_hidden_states.to(orig_dtype) class LoRAConv2dLayer(nn.Module): r""" A convolutional layer that is used with LoRA. Parameters: in_features (`int`): Number of input features. out_features (`int`): Number of output features. rank (`int`, `optional`, defaults to 4): The rank of the LoRA layer. kernel_size (`int` or `tuple` of two `int`, `optional`, defaults to 1): The kernel size of the convolution. stride (`int` or `tuple` of two `int`, `optional`, defaults to 1): The stride of the convolution. padding (`int` or `tuple` of two `int` or `str`, `optional`, defaults to 0): The padding of the convolution. network_alpha (`float`, `optional`, defaults to `None`): The value of the network alpha used for stable learning and preventing underflow. This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning """ def __init__( self, in_features: int, out_features: int, rank: int = 4, kernel_size: Union[int, Tuple[int, int]] = (1, 1), stride: Union[int, Tuple[int, int]] = (1, 1), padding: Union[int, Tuple[int, int], str] = 0, network_alpha: Optional[float] = None, ): super().__init__() deprecation_message = "Use of `LoRAConv2dLayer` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." deprecate("LoRAConv2dLayer", "1.0.0", deprecation_message) self.down = nn.Conv2d(in_features, rank, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) # according to the official kohya_ss trainer kernel_size are always fixed for the up layer # # see: https://github.com/bmaltais/kohya_ss/blob/2accb1305979ba62f5077a23aabac23b4c37e935/networks/lora_diffusers.py#L129 self.up = nn.Conv2d(rank, out_features, kernel_size=(1, 1), stride=(1, 1), bias=False) # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning self.network_alpha = network_alpha self.rank = rank nn.init.normal_(self.down.weight, std=1 / rank) nn.init.zeros_(self.up.weight) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: orig_dtype = hidden_states.dtype dtype = self.down.weight.dtype down_hidden_states = self.down(hidden_states.to(dtype)) up_hidden_states = self.up(down_hidden_states) if self.network_alpha is not None: up_hidden_states *= self.network_alpha / self.rank return up_hidden_states.to(orig_dtype) class LoRACompatibleConv(nn.Conv2d): """ A convolutional layer that can be used with LoRA. """ def __init__(self, *args, lora_layer: Optional[LoRAConv2dLayer] = None, **kwargs): deprecation_message = "Use of `LoRACompatibleConv` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." deprecate("LoRACompatibleConv", "1.0.0", deprecation_message) super().__init__(*args, **kwargs) self.lora_layer = lora_layer def set_lora_layer(self, lora_layer: Optional[LoRAConv2dLayer]): deprecation_message = "Use of `set_lora_layer()` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." deprecate("set_lora_layer", "1.0.0", deprecation_message) self.lora_layer = lora_layer def _fuse_lora(self, lora_scale: float = 1.0, safe_fusing: bool = False): if self.lora_layer is None: return dtype, device = self.weight.data.dtype, self.weight.data.device w_orig = self.weight.data.float() w_up = self.lora_layer.up.weight.data.float() w_down = self.lora_layer.down.weight.data.float() if self.lora_layer.network_alpha is not None: w_up = w_up * self.lora_layer.network_alpha / self.lora_layer.rank fusion = torch.mm(w_up.flatten(start_dim=1), w_down.flatten(start_dim=1)) fusion = fusion.reshape((w_orig.shape)) fused_weight = w_orig + (lora_scale * fusion) if safe_fusing and torch.isnan(fused_weight).any().item(): raise ValueError( "This LoRA weight seems to be broken. " f"Encountered NaN values when trying to fuse LoRA weights for {self}." "LoRA weights will not be fused." ) self.weight.data = fused_weight.to(device=device, dtype=dtype) # we can drop the lora layer now self.lora_layer = None # offload the up and down matrices to CPU to not blow the memory self.w_up = w_up.cpu() self.w_down = w_down.cpu() self._lora_scale = lora_scale def _unfuse_lora(self): if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None): return fused_weight = self.weight.data dtype, device = fused_weight.data.dtype, fused_weight.data.device self.w_up = self.w_up.to(device=device).float() self.w_down = self.w_down.to(device).float() fusion = torch.mm(self.w_up.flatten(start_dim=1), self.w_down.flatten(start_dim=1)) fusion = fusion.reshape((fused_weight.shape)) unfused_weight = fused_weight.float() - (self._lora_scale * fusion) self.weight.data = unfused_weight.to(device=device, dtype=dtype) self.w_up = None self.w_down = None def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor: if self.padding_mode != "zeros": hidden_states = F.pad(hidden_states, self._reversed_padding_repeated_twice, mode=self.padding_mode) padding = (0, 0) else: padding = self.padding original_outputs = F.conv2d( hidden_states, self.weight, self.bias, self.stride, padding, self.dilation, self.groups ) if self.lora_layer is None: return original_outputs else: return original_outputs + (scale * self.lora_layer(hidden_states)) class LoRACompatibleLinear(nn.Linear): """ A Linear layer that can be used with LoRA. """ def __init__(self, *args, lora_layer: Optional[LoRALinearLayer] = None, **kwargs): deprecation_message = "Use of `LoRACompatibleLinear` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." deprecate("LoRACompatibleLinear", "1.0.0", deprecation_message) super().__init__(*args, **kwargs) self.lora_layer = lora_layer def set_lora_layer(self, lora_layer: Optional[LoRALinearLayer]): deprecation_message = "Use of `set_lora_layer()` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." deprecate("set_lora_layer", "1.0.0", deprecation_message) self.lora_layer = lora_layer def _fuse_lora(self, lora_scale: float = 1.0, safe_fusing: bool = False): if self.lora_layer is None: return dtype, device = self.weight.data.dtype, self.weight.data.device w_orig = self.weight.data.float() w_up = self.lora_layer.up.weight.data.float() w_down = self.lora_layer.down.weight.data.float() if self.lora_layer.network_alpha is not None: w_up = w_up * self.lora_layer.network_alpha / self.lora_layer.rank fused_weight = w_orig + (lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0]) if safe_fusing and torch.isnan(fused_weight).any().item(): raise ValueError( "This LoRA weight seems to be broken. " f"Encountered NaN values when trying to fuse LoRA weights for {self}." "LoRA weights will not be fused." ) self.weight.data = fused_weight.to(device=device, dtype=dtype) # we can drop the lora layer now self.lora_layer = None # offload the up and down matrices to CPU to not blow the memory self.w_up = w_up.cpu() self.w_down = w_down.cpu() self._lora_scale = lora_scale def _unfuse_lora(self): if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None): return fused_weight = self.weight.data dtype, device = fused_weight.dtype, fused_weight.device w_up = self.w_up.to(device=device).float() w_down = self.w_down.to(device).float() unfused_weight = fused_weight.float() - (self._lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0]) self.weight.data = unfused_weight.to(device=device, dtype=dtype) self.w_up = None self.w_down = None def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor: if self.lora_layer is None: out = super().forward(hidden_states) return out else: out = super().forward(hidden_states) + (scale * self.lora_layer(hidden_states)) return out
diffusers/src/diffusers/models/lora.py/0
{ "file_path": "diffusers/src/diffusers/models/lora.py", "repo_id": "diffusers", "token_count": 7982 }
163
# Copyright 2025 HunyuanDiT Authors, Qixun Wang and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import logging from ...utils.torch_utils import maybe_allow_in_graph from ..attention import FeedForward from ..attention_processor import Attention, AttentionProcessor, FusedHunyuanAttnProcessor2_0, HunyuanAttnProcessor2_0 from ..embeddings import ( HunyuanCombinedTimestepTextSizeStyleEmbedding, PatchEmbed, PixArtAlphaTextProjection, ) from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormContinuous, FP32LayerNorm logger = logging.get_logger(__name__) # pylint: disable=invalid-name class AdaLayerNormShift(nn.Module): r""" Norm layer modified to incorporate timestep embeddings. Parameters: embedding_dim (`int`): The size of each embedding vector. num_embeddings (`int`): The size of the embeddings dictionary. """ def __init__(self, embedding_dim: int, elementwise_affine=True, eps=1e-6): super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(embedding_dim, embedding_dim) self.norm = FP32LayerNorm(embedding_dim, elementwise_affine=elementwise_affine, eps=eps) def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor: shift = self.linear(self.silu(emb.to(torch.float32)).to(emb.dtype)) x = self.norm(x) + shift.unsqueeze(dim=1) return x @maybe_allow_in_graph class HunyuanDiTBlock(nn.Module): r""" Transformer block used in Hunyuan-DiT model (https://github.com/Tencent/HunyuanDiT). Allow skip connection and QKNorm Parameters: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of headsto use for multi-head attention. cross_attention_dim (`int`,*optional*): The size of the encoder_hidden_states vector for cross attention. dropout(`float`, *optional*, defaults to 0.0): The dropout probability to use. activation_fn (`str`,*optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. . norm_elementwise_affine (`bool`, *optional*, defaults to `True`): Whether to use learnable elementwise affine parameters for normalization. norm_eps (`float`, *optional*, defaults to 1e-6): A small constant added to the denominator in normalization layers to prevent division by zero. final_dropout (`bool` *optional*, defaults to False): Whether to apply a final dropout after the last feed-forward layer. ff_inner_dim (`int`, *optional*): The size of the hidden layer in the feed-forward block. Defaults to `None`. ff_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the feed-forward block. skip (`bool`, *optional*, defaults to `False`): Whether to use skip connection. Defaults to `False` for down-blocks and mid-blocks. qk_norm (`bool`, *optional*, defaults to `True`): Whether to use normalization in QK calculation. Defaults to `True`. """ def __init__( self, dim: int, num_attention_heads: int, cross_attention_dim: int = 1024, dropout=0.0, activation_fn: str = "geglu", norm_elementwise_affine: bool = True, norm_eps: float = 1e-6, final_dropout: bool = False, ff_inner_dim: Optional[int] = None, ff_bias: bool = True, skip: bool = False, qk_norm: bool = True, ): super().__init__() # Define 3 blocks. Each block has its own normalization layer. # NOTE: when new version comes, check norm2 and norm 3 # 1. Self-Attn self.norm1 = AdaLayerNormShift(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) self.attn1 = Attention( query_dim=dim, cross_attention_dim=None, dim_head=dim // num_attention_heads, heads=num_attention_heads, qk_norm="layer_norm" if qk_norm else None, eps=1e-6, bias=True, processor=HunyuanAttnProcessor2_0(), ) # 2. Cross-Attn self.norm2 = FP32LayerNorm(dim, norm_eps, norm_elementwise_affine) self.attn2 = Attention( query_dim=dim, cross_attention_dim=cross_attention_dim, dim_head=dim // num_attention_heads, heads=num_attention_heads, qk_norm="layer_norm" if qk_norm else None, eps=1e-6, bias=True, processor=HunyuanAttnProcessor2_0(), ) # 3. Feed-forward self.norm3 = FP32LayerNorm(dim, norm_eps, norm_elementwise_affine) self.ff = FeedForward( dim, dropout=dropout, ### 0.0 activation_fn=activation_fn, ### approx GeLU final_dropout=final_dropout, ### 0.0 inner_dim=ff_inner_dim, ### int(dim * mlp_ratio) bias=ff_bias, ) # 4. Skip Connection if skip: self.skip_norm = FP32LayerNorm(2 * dim, norm_eps, elementwise_affine=True) self.skip_linear = nn.Linear(2 * dim, dim) else: self.skip_linear = None # let chunk size default to None self._chunk_size = None self._chunk_dim = 0 # Copied from diffusers.models.attention.BasicTransformerBlock.set_chunk_feed_forward def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0): # Sets chunk feed-forward self._chunk_size = chunk_size self._chunk_dim = dim def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None, image_rotary_emb=None, skip=None, ) -> torch.Tensor: # Notice that normalization is always applied before the real computation in the following blocks. # 0. Long Skip Connection if self.skip_linear is not None: cat = torch.cat([hidden_states, skip], dim=-1) cat = self.skip_norm(cat) hidden_states = self.skip_linear(cat) # 1. Self-Attention norm_hidden_states = self.norm1(hidden_states, temb) ### checked: self.norm1 is correct attn_output = self.attn1( norm_hidden_states, image_rotary_emb=image_rotary_emb, ) hidden_states = hidden_states + attn_output # 2. Cross-Attention hidden_states = hidden_states + self.attn2( self.norm2(hidden_states), encoder_hidden_states=encoder_hidden_states, image_rotary_emb=image_rotary_emb, ) # FFN Layer ### TODO: switch norm2 and norm3 in the state dict mlp_inputs = self.norm3(hidden_states) hidden_states = hidden_states + self.ff(mlp_inputs) return hidden_states class HunyuanDiT2DModel(ModelMixin, ConfigMixin): """ HunYuanDiT: Diffusion model with a Transformer backbone. Inherit ModelMixin and ConfigMixin to be compatible with the sampler StableDiffusionPipeline of diffusers. Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): The number of channels in the input and output (specify if the input is **continuous**). patch_size (`int`, *optional*): The size of the patch to use for the input. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward. sample_size (`int`, *optional*): The width of the latent images. This is fixed during training since it is used to learn a number of position embeddings. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The number of dimension in the clip text embedding. hidden_size (`int`, *optional*): The size of hidden layer in the conditioning embedding layers. num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. mlp_ratio (`float`, *optional*, defaults to 4.0): The ratio of the hidden layer size to the input size. learn_sigma (`bool`, *optional*, defaults to `True`): Whether to predict variance. cross_attention_dim_t5 (`int`, *optional*): The number dimensions in t5 text embedding. pooled_projection_dim (`int`, *optional*): The size of the pooled projection. text_len (`int`, *optional*): The length of the clip text embedding. text_len_t5 (`int`, *optional*): The length of the T5 text embedding. use_style_cond_and_image_meta_size (`bool`, *optional*): Whether or not to use style condition and image meta size. True for version <=1.1, False for version >= 1.2 """ _skip_layerwise_casting_patterns = ["pos_embed", "norm", "pooler"] _supports_group_offloading = False @register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, patch_size: Optional[int] = None, activation_fn: str = "gelu-approximate", sample_size=32, hidden_size=1152, num_layers: int = 28, mlp_ratio: float = 4.0, learn_sigma: bool = True, cross_attention_dim: int = 1024, norm_type: str = "layer_norm", cross_attention_dim_t5: int = 2048, pooled_projection_dim: int = 1024, text_len: int = 77, text_len_t5: int = 256, use_style_cond_and_image_meta_size: bool = True, ): super().__init__() self.out_channels = in_channels * 2 if learn_sigma else in_channels self.num_heads = num_attention_heads self.inner_dim = num_attention_heads * attention_head_dim self.text_embedder = PixArtAlphaTextProjection( in_features=cross_attention_dim_t5, hidden_size=cross_attention_dim_t5 * 4, out_features=cross_attention_dim, act_fn="silu_fp32", ) self.text_embedding_padding = nn.Parameter(torch.randn(text_len + text_len_t5, cross_attention_dim)) self.pos_embed = PatchEmbed( height=sample_size, width=sample_size, in_channels=in_channels, embed_dim=hidden_size, patch_size=patch_size, pos_embed_type=None, ) self.time_extra_emb = HunyuanCombinedTimestepTextSizeStyleEmbedding( hidden_size, pooled_projection_dim=pooled_projection_dim, seq_len=text_len_t5, cross_attention_dim=cross_attention_dim_t5, use_style_cond_and_image_meta_size=use_style_cond_and_image_meta_size, ) # HunyuanDiT Blocks self.blocks = nn.ModuleList( [ HunyuanDiTBlock( dim=self.inner_dim, num_attention_heads=self.config.num_attention_heads, activation_fn=activation_fn, ff_inner_dim=int(self.inner_dim * mlp_ratio), cross_attention_dim=cross_attention_dim, qk_norm=True, # See https://huggingface.co/papers/2302.05442 for details. skip=layer > num_layers // 2, ) for layer in range(num_layers) ] ) self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedHunyuanAttnProcessor2_0 def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ self.original_attn_processors = None for _, attn_processor in self.attn_processors.items(): if "Added" in str(attn_processor.__class__.__name__): raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedHunyuanAttnProcessor2_0()) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ self.set_attn_processor(HunyuanAttnProcessor2_0()) def forward( self, hidden_states, timestep, encoder_hidden_states=None, text_embedding_mask=None, encoder_hidden_states_t5=None, text_embedding_mask_t5=None, image_meta_size=None, style=None, image_rotary_emb=None, controlnet_block_samples=None, return_dict=True, ): """ The [`HunyuanDiT2DModel`] forward method. Args: hidden_states (`torch.Tensor` of shape `(batch size, dim, height, width)`): The input tensor. timestep ( `torch.LongTensor`, *optional*): Used to indicate denoising step. encoder_hidden_states ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*): Conditional embeddings for cross attention layer. This is the output of `BertModel`. text_embedding_mask: torch.Tensor An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output of `BertModel`. encoder_hidden_states_t5 ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*): Conditional embeddings for cross attention layer. This is the output of T5 Text Encoder. text_embedding_mask_t5: torch.Tensor An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output of T5 Text Encoder. image_meta_size (torch.Tensor): Conditional embedding indicate the image sizes style: torch.Tensor: Conditional embedding indicate the style image_rotary_emb (`torch.Tensor`): The image rotary embeddings to apply on query and key tensors during attention calculation. return_dict: bool Whether to return a dictionary. """ height, width = hidden_states.shape[-2:] hidden_states = self.pos_embed(hidden_states) temb = self.time_extra_emb( timestep, encoder_hidden_states_t5, image_meta_size, style, hidden_dtype=timestep.dtype ) # [B, D] # text projection batch_size, sequence_length, _ = encoder_hidden_states_t5.shape encoder_hidden_states_t5 = self.text_embedder( encoder_hidden_states_t5.view(-1, encoder_hidden_states_t5.shape[-1]) ) encoder_hidden_states_t5 = encoder_hidden_states_t5.view(batch_size, sequence_length, -1) encoder_hidden_states = torch.cat([encoder_hidden_states, encoder_hidden_states_t5], dim=1) text_embedding_mask = torch.cat([text_embedding_mask, text_embedding_mask_t5], dim=-1) text_embedding_mask = text_embedding_mask.unsqueeze(2).bool() encoder_hidden_states = torch.where(text_embedding_mask, encoder_hidden_states, self.text_embedding_padding) skips = [] for layer, block in enumerate(self.blocks): if layer > self.config.num_layers // 2: if controlnet_block_samples is not None: skip = skips.pop() + controlnet_block_samples.pop() else: skip = skips.pop() hidden_states = block( hidden_states, temb=temb, encoder_hidden_states=encoder_hidden_states, image_rotary_emb=image_rotary_emb, skip=skip, ) # (N, L, D) else: hidden_states = block( hidden_states, temb=temb, encoder_hidden_states=encoder_hidden_states, image_rotary_emb=image_rotary_emb, ) # (N, L, D) if layer < (self.config.num_layers // 2 - 1): skips.append(hidden_states) if controlnet_block_samples is not None and len(controlnet_block_samples) != 0: raise ValueError("The number of controls is not equal to the number of skip connections.") # final layer hidden_states = self.norm_out(hidden_states, temb.to(torch.float32)) hidden_states = self.proj_out(hidden_states) # (N, L, patch_size ** 2 * out_channels) # unpatchify: (N, out_channels, H, W) patch_size = self.pos_embed.patch_size height = height // patch_size width = width // patch_size hidden_states = hidden_states.reshape( shape=(hidden_states.shape[0], height, width, patch_size, patch_size, self.out_channels) ) hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) output = hidden_states.reshape( shape=(hidden_states.shape[0], self.out_channels, height * patch_size, width * patch_size) ) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output) # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.enable_forward_chunking def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None: """ Sets the attention processor to use [feed forward chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers). Parameters: chunk_size (`int`, *optional*): The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually over each tensor of dim=`dim`. dim (`int`, *optional*, defaults to `0`): The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch) or dim=1 (sequence length). """ if dim not in [0, 1]: raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}") # By default chunk size is 1 chunk_size = chunk_size or 1 def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, "set_chunk_feed_forward"): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, chunk_size, dim) # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.disable_forward_chunking def disable_forward_chunking(self): def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, "set_chunk_feed_forward"): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, None, 0)
diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py", "repo_id": "diffusers", "token_count": 10633 }
164
# Copyright 2025 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FluxTransformer2DLoadersMixin, FromOriginalModelMixin, PeftAdapterMixin from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers from ...utils.import_utils import is_torch_npu_available from ...utils.torch_utils import maybe_allow_in_graph from ..attention import AttentionMixin, AttentionModuleMixin, FeedForward from ..attention_dispatch import dispatch_attention_fn from ..cache_utils import CacheMixin from ..embeddings import ( CombinedTimestepGuidanceTextProjEmbeddings, CombinedTimestepTextProjEmbeddings, apply_rotary_emb, get_1d_rotary_pos_embed, ) from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle logger = logging.get_logger(__name__) # pylint: disable=invalid-name def _get_projections(attn: "FluxAttention", hidden_states, encoder_hidden_states=None): query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) encoder_query = encoder_key = encoder_value = None if encoder_hidden_states is not None and attn.added_kv_proj_dim is not None: encoder_query = attn.add_q_proj(encoder_hidden_states) encoder_key = attn.add_k_proj(encoder_hidden_states) encoder_value = attn.add_v_proj(encoder_hidden_states) return query, key, value, encoder_query, encoder_key, encoder_value def _get_fused_projections(attn: "FluxAttention", hidden_states, encoder_hidden_states=None): query, key, value = attn.to_qkv(hidden_states).chunk(3, dim=-1) encoder_query = encoder_key = encoder_value = (None,) if encoder_hidden_states is not None and hasattr(attn, "to_added_qkv"): encoder_query, encoder_key, encoder_value = attn.to_added_qkv(encoder_hidden_states).chunk(3, dim=-1) return query, key, value, encoder_query, encoder_key, encoder_value def _get_qkv_projections(attn: "FluxAttention", hidden_states, encoder_hidden_states=None): if attn.fused_projections: return _get_fused_projections(attn, hidden_states, encoder_hidden_states) return _get_projections(attn, hidden_states, encoder_hidden_states) class FluxAttnProcessor: _attention_backend = None def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError(f"{self.__class__.__name__} requires PyTorch 2.0. Please upgrade your pytorch version.") def __call__( self, attn: "FluxAttention", hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor = None, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, ) -> torch.Tensor: query, key, value, encoder_query, encoder_key, encoder_value = _get_qkv_projections( attn, hidden_states, encoder_hidden_states ) query = query.unflatten(-1, (attn.heads, -1)) key = key.unflatten(-1, (attn.heads, -1)) value = value.unflatten(-1, (attn.heads, -1)) query = attn.norm_q(query) key = attn.norm_k(key) if attn.added_kv_proj_dim is not None: encoder_query = encoder_query.unflatten(-1, (attn.heads, -1)) encoder_key = encoder_key.unflatten(-1, (attn.heads, -1)) encoder_value = encoder_value.unflatten(-1, (attn.heads, -1)) encoder_query = attn.norm_added_q(encoder_query) encoder_key = attn.norm_added_k(encoder_key) query = torch.cat([encoder_query, query], dim=1) key = torch.cat([encoder_key, key], dim=1) value = torch.cat([encoder_value, value], dim=1) if image_rotary_emb is not None: query = apply_rotary_emb(query, image_rotary_emb, sequence_dim=1) key = apply_rotary_emb(key, image_rotary_emb, sequence_dim=1) hidden_states = dispatch_attention_fn( query, key, value, attn_mask=attention_mask, backend=self._attention_backend ) hidden_states = hidden_states.flatten(2, 3) hidden_states = hidden_states.to(query.dtype) if encoder_hidden_states is not None: encoder_hidden_states, hidden_states = hidden_states.split_with_sizes( [encoder_hidden_states.shape[1], hidden_states.shape[1] - encoder_hidden_states.shape[1]], dim=1 ) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) encoder_hidden_states = attn.to_add_out(encoder_hidden_states) return hidden_states, encoder_hidden_states else: return hidden_states class FluxIPAdapterAttnProcessor(torch.nn.Module): """Flux Attention processor for IP-Adapter.""" _attention_backend = None def __init__( self, hidden_size: int, cross_attention_dim: int, num_tokens=(4,), scale=1.0, device=None, dtype=None ): super().__init__() if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( f"{self.__class__.__name__} requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." ) self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim if not isinstance(num_tokens, (tuple, list)): num_tokens = [num_tokens] if not isinstance(scale, list): scale = [scale] * len(num_tokens) if len(scale) != len(num_tokens): raise ValueError("`scale` should be a list of integers with the same length as `num_tokens`.") self.scale = scale self.to_k_ip = nn.ModuleList( [ nn.Linear(cross_attention_dim, hidden_size, bias=True, device=device, dtype=dtype) for _ in range(len(num_tokens)) ] ) self.to_v_ip = nn.ModuleList( [ nn.Linear(cross_attention_dim, hidden_size, bias=True, device=device, dtype=dtype) for _ in range(len(num_tokens)) ] ) def __call__( self, attn: "FluxAttention", hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor = None, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, ip_hidden_states: Optional[List[torch.Tensor]] = None, ip_adapter_masks: Optional[torch.Tensor] = None, ) -> torch.Tensor: batch_size = hidden_states.shape[0] query, key, value, encoder_query, encoder_key, encoder_value = _get_qkv_projections( attn, hidden_states, encoder_hidden_states ) query = query.unflatten(-1, (attn.heads, -1)) key = key.unflatten(-1, (attn.heads, -1)) value = value.unflatten(-1, (attn.heads, -1)) query = attn.norm_q(query) key = attn.norm_k(key) ip_query = query if encoder_hidden_states is not None: encoder_query = encoder_query.unflatten(-1, (attn.heads, -1)) encoder_key = encoder_key.unflatten(-1, (attn.heads, -1)) encoder_value = encoder_value.unflatten(-1, (attn.heads, -1)) encoder_query = attn.norm_added_q(encoder_query) encoder_key = attn.norm_added_k(encoder_key) query = torch.cat([encoder_query, query], dim=1) key = torch.cat([encoder_key, key], dim=1) value = torch.cat([encoder_value, value], dim=1) if image_rotary_emb is not None: query = apply_rotary_emb(query, image_rotary_emb, sequence_dim=1) key = apply_rotary_emb(key, image_rotary_emb, sequence_dim=1) hidden_states = dispatch_attention_fn( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False, backend=self._attention_backend, ) hidden_states = hidden_states.flatten(2, 3) hidden_states = hidden_states.to(query.dtype) if encoder_hidden_states is not None: encoder_hidden_states, hidden_states = hidden_states.split_with_sizes( [encoder_hidden_states.shape[1], hidden_states.shape[1] - encoder_hidden_states.shape[1]], dim=1 ) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # IP-adapter ip_attn_output = torch.zeros_like(hidden_states) for current_ip_hidden_states, scale, to_k_ip, to_v_ip in zip( ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip ): ip_key = to_k_ip(current_ip_hidden_states) ip_value = to_v_ip(current_ip_hidden_states) ip_key = ip_key.view(batch_size, -1, attn.heads, attn.head_dim) ip_value = ip_value.view(batch_size, -1, attn.heads, attn.head_dim) current_ip_hidden_states = dispatch_attention_fn( ip_query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False, backend=self._attention_backend, ) current_ip_hidden_states = current_ip_hidden_states.reshape(batch_size, -1, attn.heads * attn.head_dim) current_ip_hidden_states = current_ip_hidden_states.to(ip_query.dtype) ip_attn_output += scale * current_ip_hidden_states return hidden_states, encoder_hidden_states, ip_attn_output else: return hidden_states class FluxAttention(torch.nn.Module, AttentionModuleMixin): _default_processor_cls = FluxAttnProcessor _available_processors = [ FluxAttnProcessor, FluxIPAdapterAttnProcessor, ] def __init__( self, query_dim: int, heads: int = 8, dim_head: int = 64, dropout: float = 0.0, bias: bool = False, added_kv_proj_dim: Optional[int] = None, added_proj_bias: Optional[bool] = True, out_bias: bool = True, eps: float = 1e-5, out_dim: int = None, context_pre_only: Optional[bool] = None, pre_only: bool = False, elementwise_affine: bool = True, processor=None, ): super().__init__() self.head_dim = dim_head self.inner_dim = out_dim if out_dim is not None else dim_head * heads self.query_dim = query_dim self.use_bias = bias self.dropout = dropout self.out_dim = out_dim if out_dim is not None else query_dim self.context_pre_only = context_pre_only self.pre_only = pre_only self.heads = out_dim // dim_head if out_dim is not None else heads self.added_kv_proj_dim = added_kv_proj_dim self.added_proj_bias = added_proj_bias self.norm_q = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine) self.norm_k = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine) self.to_q = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) self.to_k = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) self.to_v = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) if not self.pre_only: self.to_out = torch.nn.ModuleList([]) self.to_out.append(torch.nn.Linear(self.inner_dim, self.out_dim, bias=out_bias)) self.to_out.append(torch.nn.Dropout(dropout)) if added_kv_proj_dim is not None: self.norm_added_q = torch.nn.RMSNorm(dim_head, eps=eps) self.norm_added_k = torch.nn.RMSNorm(dim_head, eps=eps) self.add_q_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) self.add_k_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) self.add_v_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) self.to_add_out = torch.nn.Linear(self.inner_dim, query_dim, bias=out_bias) if processor is None: processor = self._default_processor_cls() self.set_processor(processor) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, **kwargs, ) -> torch.Tensor: attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) quiet_attn_parameters = {"ip_adapter_masks", "ip_hidden_states"} unused_kwargs = [k for k, _ in kwargs.items() if k not in attn_parameters and k not in quiet_attn_parameters] if len(unused_kwargs) > 0: logger.warning( f"joint_attention_kwargs {unused_kwargs} are not expected by {self.processor.__class__.__name__} and will be ignored." ) kwargs = {k: w for k, w in kwargs.items() if k in attn_parameters} return self.processor(self, hidden_states, encoder_hidden_states, attention_mask, image_rotary_emb, **kwargs) @maybe_allow_in_graph class FluxSingleTransformerBlock(nn.Module): def __init__(self, dim: int, num_attention_heads: int, attention_head_dim: int, mlp_ratio: float = 4.0): super().__init__() self.mlp_hidden_dim = int(dim * mlp_ratio) self.norm = AdaLayerNormZeroSingle(dim) self.proj_mlp = nn.Linear(dim, self.mlp_hidden_dim) self.act_mlp = nn.GELU(approximate="tanh") self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim) if is_torch_npu_available(): from ..attention_processor import FluxAttnProcessor2_0_NPU deprecation_message = ( "Defaulting to FluxAttnProcessor2_0_NPU for NPU devices will be removed. Attention processors " "should be set explicitly using the `set_attn_processor` method." ) deprecate("npu_processor", "0.34.0", deprecation_message) processor = FluxAttnProcessor2_0_NPU() else: processor = FluxAttnProcessor() self.attn = FluxAttention( query_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, bias=True, processor=processor, eps=1e-6, pre_only=True, ) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, joint_attention_kwargs: Optional[Dict[str, Any]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: text_seq_len = encoder_hidden_states.shape[1] hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) residual = hidden_states norm_hidden_states, gate = self.norm(hidden_states, emb=temb) mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) joint_attention_kwargs = joint_attention_kwargs or {} attn_output = self.attn( hidden_states=norm_hidden_states, image_rotary_emb=image_rotary_emb, **joint_attention_kwargs, ) hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2) gate = gate.unsqueeze(1) hidden_states = gate * self.proj_out(hidden_states) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16: hidden_states = hidden_states.clip(-65504, 65504) encoder_hidden_states, hidden_states = hidden_states[:, :text_seq_len], hidden_states[:, text_seq_len:] return encoder_hidden_states, hidden_states @maybe_allow_in_graph class FluxTransformerBlock(nn.Module): def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, qk_norm: str = "rms_norm", eps: float = 1e-6 ): super().__init__() self.norm1 = AdaLayerNormZero(dim) self.norm1_context = AdaLayerNormZero(dim) self.attn = FluxAttention( query_dim=dim, added_kv_proj_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, context_pre_only=False, bias=True, processor=FluxAttnProcessor(), eps=eps, ) self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, joint_attention_kwargs: Optional[Dict[str, Any]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( encoder_hidden_states, emb=temb ) joint_attention_kwargs = joint_attention_kwargs or {} # Attention. attention_outputs = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb, **joint_attention_kwargs, ) if len(attention_outputs) == 2: attn_output, context_attn_output = attention_outputs elif len(attention_outputs) == 3: attn_output, context_attn_output, ip_attn_output = attention_outputs # Process attention outputs for the `hidden_states`. attn_output = gate_msa.unsqueeze(1) * attn_output hidden_states = hidden_states + attn_output norm_hidden_states = self.norm2(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] ff_output = self.ff(norm_hidden_states) ff_output = gate_mlp.unsqueeze(1) * ff_output hidden_states = hidden_states + ff_output if len(attention_outputs) == 3: hidden_states = hidden_states + ip_attn_output # Process attention outputs for the `encoder_hidden_states`. context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output encoder_hidden_states = encoder_hidden_states + context_attn_output norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] context_ff_output = self.ff_context(norm_encoder_hidden_states) encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output if encoder_hidden_states.dtype == torch.float16: encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504) return encoder_hidden_states, hidden_states class FluxPosEmbed(nn.Module): # modified from https://github.com/black-forest-labs/flux/blob/c00d7c60b085fce8058b9df845e036090873f2ce/src/flux/modules/layers.py#L11 def __init__(self, theta: int, axes_dim: List[int]): super().__init__() self.theta = theta self.axes_dim = axes_dim def forward(self, ids: torch.Tensor) -> torch.Tensor: n_axes = ids.shape[-1] cos_out = [] sin_out = [] pos = ids.float() is_mps = ids.device.type == "mps" is_npu = ids.device.type == "npu" freqs_dtype = torch.float32 if (is_mps or is_npu) else torch.float64 for i in range(n_axes): cos, sin = get_1d_rotary_pos_embed( self.axes_dim[i], pos[:, i], theta=self.theta, repeat_interleave_real=True, use_real=True, freqs_dtype=freqs_dtype, ) cos_out.append(cos) sin_out.append(sin) freqs_cos = torch.cat(cos_out, dim=-1).to(ids.device) freqs_sin = torch.cat(sin_out, dim=-1).to(ids.device) return freqs_cos, freqs_sin class FluxTransformer2DModel( ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, FluxTransformer2DLoadersMixin, CacheMixin, AttentionMixin, ): """ The Transformer model introduced in Flux. Reference: https://blackforestlabs.ai/announcing-black-forest-labs/ Args: patch_size (`int`, defaults to `1`): Patch size to turn the input data into small patches. in_channels (`int`, defaults to `64`): The number of channels in the input. out_channels (`int`, *optional*, defaults to `None`): The number of channels in the output. If not specified, it defaults to `in_channels`. num_layers (`int`, defaults to `19`): The number of layers of dual stream DiT blocks to use. num_single_layers (`int`, defaults to `38`): The number of layers of single stream DiT blocks to use. attention_head_dim (`int`, defaults to `128`): The number of dimensions to use for each attention head. num_attention_heads (`int`, defaults to `24`): The number of attention heads to use. joint_attention_dim (`int`, defaults to `4096`): The number of dimensions to use for the joint attention (embedding/channel dimension of `encoder_hidden_states`). pooled_projection_dim (`int`, defaults to `768`): The number of dimensions to use for the pooled projection. guidance_embeds (`bool`, defaults to `False`): Whether to use guidance embeddings for guidance-distilled variant of the model. axes_dims_rope (`Tuple[int]`, defaults to `(16, 56, 56)`): The dimensions to use for the rotary positional embeddings. """ _supports_gradient_checkpointing = True _no_split_modules = ["FluxTransformerBlock", "FluxSingleTransformerBlock"] _skip_layerwise_casting_patterns = ["pos_embed", "norm"] _repeated_blocks = ["FluxTransformerBlock", "FluxSingleTransformerBlock"] @register_to_config def __init__( self, patch_size: int = 1, in_channels: int = 64, out_channels: Optional[int] = None, num_layers: int = 19, num_single_layers: int = 38, attention_head_dim: int = 128, num_attention_heads: int = 24, joint_attention_dim: int = 4096, pooled_projection_dim: int = 768, guidance_embeds: bool = False, axes_dims_rope: Tuple[int, int, int] = (16, 56, 56), ): super().__init__() self.out_channels = out_channels or in_channels self.inner_dim = num_attention_heads * attention_head_dim self.pos_embed = FluxPosEmbed(theta=10000, axes_dim=axes_dims_rope) text_time_guidance_cls = ( CombinedTimestepGuidanceTextProjEmbeddings if guidance_embeds else CombinedTimestepTextProjEmbeddings ) self.time_text_embed = text_time_guidance_cls( embedding_dim=self.inner_dim, pooled_projection_dim=pooled_projection_dim ) self.context_embedder = nn.Linear(joint_attention_dim, self.inner_dim) self.x_embedder = nn.Linear(in_channels, self.inner_dim) self.transformer_blocks = nn.ModuleList( [ FluxTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, ) for _ in range(num_layers) ] ) self.single_transformer_blocks = nn.ModuleList( [ FluxSingleTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, ) for _ in range(num_single_layers) ] ) self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor = None, pooled_projections: torch.Tensor = None, timestep: torch.LongTensor = None, img_ids: torch.Tensor = None, txt_ids: torch.Tensor = None, guidance: torch.Tensor = None, joint_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_block_samples=None, controlnet_single_block_samples=None, return_dict: bool = True, controlnet_blocks_repeat: bool = False, ) -> Union[torch.Tensor, Transformer2DModelOutput]: """ The [`FluxTransformer2DModel`] forward method. Args: hidden_states (`torch.Tensor` of shape `(batch_size, image_sequence_length, in_channels)`): Input `hidden_states`. encoder_hidden_states (`torch.Tensor` of shape `(batch_size, text_sequence_length, joint_attention_dim)`): Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. pooled_projections (`torch.Tensor` of shape `(batch_size, projection_dim)`): Embeddings projected from the embeddings of input conditions. timestep ( `torch.LongTensor`): Used to indicate denoising step. block_controlnet_hidden_states: (`list` of `torch.Tensor`): A list of tensors that if specified are added to the residuals of transformer blocks. joint_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain tuple. Returns: If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ if joint_attention_kwargs is not None: joint_attention_kwargs = joint_attention_kwargs.copy() lora_scale = joint_attention_kwargs.pop("scale", 1.0) else: lora_scale = 1.0 if USE_PEFT_BACKEND: # weight the lora layers by setting `lora_scale` for each PEFT layer scale_lora_layers(self, lora_scale) else: if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None: logger.warning( "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective." ) hidden_states = self.x_embedder(hidden_states) timestep = timestep.to(hidden_states.dtype) * 1000 if guidance is not None: guidance = guidance.to(hidden_states.dtype) * 1000 temb = ( self.time_text_embed(timestep, pooled_projections) if guidance is None else self.time_text_embed(timestep, guidance, pooled_projections) ) encoder_hidden_states = self.context_embedder(encoder_hidden_states) if txt_ids.ndim == 3: logger.warning( "Passing `txt_ids` 3d torch.Tensor is deprecated." "Please remove the batch dimension and pass it as a 2d torch Tensor" ) txt_ids = txt_ids[0] if img_ids.ndim == 3: logger.warning( "Passing `img_ids` 3d torch.Tensor is deprecated." "Please remove the batch dimension and pass it as a 2d torch Tensor" ) img_ids = img_ids[0] ids = torch.cat((txt_ids, img_ids), dim=0) image_rotary_emb = self.pos_embed(ids) if joint_attention_kwargs is not None and "ip_adapter_image_embeds" in joint_attention_kwargs: ip_adapter_image_embeds = joint_attention_kwargs.pop("ip_adapter_image_embeds") ip_hidden_states = self.encoder_hid_proj(ip_adapter_image_embeds) joint_attention_kwargs.update({"ip_hidden_states": ip_hidden_states}) for index_block, block in enumerate(self.transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: encoder_hidden_states, hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, temb, image_rotary_emb, joint_attention_kwargs, ) else: encoder_hidden_states, hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, joint_attention_kwargs=joint_attention_kwargs, ) # controlnet residual if controlnet_block_samples is not None: interval_control = len(self.transformer_blocks) / len(controlnet_block_samples) interval_control = int(np.ceil(interval_control)) # For Xlabs ControlNet. if controlnet_blocks_repeat: hidden_states = ( hidden_states + controlnet_block_samples[index_block % len(controlnet_block_samples)] ) else: hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control] for index_block, block in enumerate(self.single_transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: encoder_hidden_states, hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, temb, image_rotary_emb, joint_attention_kwargs, ) else: encoder_hidden_states, hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, joint_attention_kwargs=joint_attention_kwargs, ) # controlnet residual if controlnet_single_block_samples is not None: interval_control = len(self.single_transformer_blocks) / len(controlnet_single_block_samples) interval_control = int(np.ceil(interval_control)) hidden_states = hidden_states + controlnet_single_block_samples[index_block // interval_control] hidden_states = self.norm_out(hidden_states, temb) output = self.proj_out(hidden_states) if USE_PEFT_BACKEND: # remove `lora_scale` from each PEFT layer unscale_lora_layers(self, lora_scale) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
diffusers/src/diffusers/models/transformers/transformer_flux.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/transformer_flux.py", "repo_id": "diffusers", "token_count": 15574 }
165
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Optional, Tuple, Union import torch import torch.nn.functional as F from torch import nn from ..activations import get_activation from ..resnet import Downsample1D, ResidualTemporalBlock1D, Upsample1D, rearrange_dims class DownResnetBlock1D(nn.Module): def __init__( self, in_channels: int, out_channels: Optional[int] = None, num_layers: int = 1, conv_shortcut: bool = False, temb_channels: int = 32, groups: int = 32, groups_out: Optional[int] = None, non_linearity: Optional[str] = None, time_embedding_norm: str = "default", output_scale_factor: float = 1.0, add_downsample: bool = True, ): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.time_embedding_norm = time_embedding_norm self.add_downsample = add_downsample self.output_scale_factor = output_scale_factor if groups_out is None: groups_out = groups # there will always be at least one resnet resnets = [ResidualTemporalBlock1D(in_channels, out_channels, embed_dim=temb_channels)] for _ in range(num_layers): resnets.append(ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=temb_channels)) self.resnets = nn.ModuleList(resnets) if non_linearity is None: self.nonlinearity = None else: self.nonlinearity = get_activation(non_linearity) self.downsample = None if add_downsample: self.downsample = Downsample1D(out_channels, use_conv=True, padding=1) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor: output_states = () hidden_states = self.resnets[0](hidden_states, temb) for resnet in self.resnets[1:]: hidden_states = resnet(hidden_states, temb) output_states += (hidden_states,) if self.nonlinearity is not None: hidden_states = self.nonlinearity(hidden_states) if self.downsample is not None: hidden_states = self.downsample(hidden_states) return hidden_states, output_states class UpResnetBlock1D(nn.Module): def __init__( self, in_channels: int, out_channels: Optional[int] = None, num_layers: int = 1, temb_channels: int = 32, groups: int = 32, groups_out: Optional[int] = None, non_linearity: Optional[str] = None, time_embedding_norm: str = "default", output_scale_factor: float = 1.0, add_upsample: bool = True, ): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.time_embedding_norm = time_embedding_norm self.add_upsample = add_upsample self.output_scale_factor = output_scale_factor if groups_out is None: groups_out = groups # there will always be at least one resnet resnets = [ResidualTemporalBlock1D(2 * in_channels, out_channels, embed_dim=temb_channels)] for _ in range(num_layers): resnets.append(ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=temb_channels)) self.resnets = nn.ModuleList(resnets) if non_linearity is None: self.nonlinearity = None else: self.nonlinearity = get_activation(non_linearity) self.upsample = None if add_upsample: self.upsample = Upsample1D(out_channels, use_conv_transpose=True) def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Optional[Tuple[torch.Tensor, ...]] = None, temb: Optional[torch.Tensor] = None, ) -> torch.Tensor: if res_hidden_states_tuple is not None: res_hidden_states = res_hidden_states_tuple[-1] hidden_states = torch.cat((hidden_states, res_hidden_states), dim=1) hidden_states = self.resnets[0](hidden_states, temb) for resnet in self.resnets[1:]: hidden_states = resnet(hidden_states, temb) if self.nonlinearity is not None: hidden_states = self.nonlinearity(hidden_states) if self.upsample is not None: hidden_states = self.upsample(hidden_states) return hidden_states class ValueFunctionMidBlock1D(nn.Module): def __init__(self, in_channels: int, out_channels: int, embed_dim: int): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.embed_dim = embed_dim self.res1 = ResidualTemporalBlock1D(in_channels, in_channels // 2, embed_dim=embed_dim) self.down1 = Downsample1D(out_channels // 2, use_conv=True) self.res2 = ResidualTemporalBlock1D(in_channels // 2, in_channels // 4, embed_dim=embed_dim) self.down2 = Downsample1D(out_channels // 4, use_conv=True) def forward(self, x: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor: x = self.res1(x, temb) x = self.down1(x) x = self.res2(x, temb) x = self.down2(x) return x class MidResTemporalBlock1D(nn.Module): def __init__( self, in_channels: int, out_channels: int, embed_dim: int, num_layers: int = 1, add_downsample: bool = False, add_upsample: bool = False, non_linearity: Optional[str] = None, ): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.add_downsample = add_downsample # there will always be at least one resnet resnets = [ResidualTemporalBlock1D(in_channels, out_channels, embed_dim=embed_dim)] for _ in range(num_layers): resnets.append(ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=embed_dim)) self.resnets = nn.ModuleList(resnets) if non_linearity is None: self.nonlinearity = None else: self.nonlinearity = get_activation(non_linearity) self.upsample = None if add_upsample: self.upsample = Upsample1D(out_channels, use_conv=True) self.downsample = None if add_downsample: self.downsample = Downsample1D(out_channels, use_conv=True) if self.upsample and self.downsample: raise ValueError("Block cannot downsample and upsample") def forward(self, hidden_states: torch.Tensor, temb: torch.Tensor) -> torch.Tensor: hidden_states = self.resnets[0](hidden_states, temb) for resnet in self.resnets[1:]: hidden_states = resnet(hidden_states, temb) if self.upsample: hidden_states = self.upsample(hidden_states) if self.downsample: hidden_states = self.downsample(hidden_states) return hidden_states class OutConv1DBlock(nn.Module): def __init__(self, num_groups_out: int, out_channels: int, embed_dim: int, act_fn: str): super().__init__() self.final_conv1d_1 = nn.Conv1d(embed_dim, embed_dim, 5, padding=2) self.final_conv1d_gn = nn.GroupNorm(num_groups_out, embed_dim) self.final_conv1d_act = get_activation(act_fn) self.final_conv1d_2 = nn.Conv1d(embed_dim, out_channels, 1) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor: hidden_states = self.final_conv1d_1(hidden_states) hidden_states = rearrange_dims(hidden_states) hidden_states = self.final_conv1d_gn(hidden_states) hidden_states = rearrange_dims(hidden_states) hidden_states = self.final_conv1d_act(hidden_states) hidden_states = self.final_conv1d_2(hidden_states) return hidden_states class OutValueFunctionBlock(nn.Module): def __init__(self, fc_dim: int, embed_dim: int, act_fn: str = "mish"): super().__init__() self.final_block = nn.ModuleList( [ nn.Linear(fc_dim + embed_dim, fc_dim // 2), get_activation(act_fn), nn.Linear(fc_dim // 2, 1), ] ) def forward(self, hidden_states: torch.Tensor, temb: torch.Tensor) -> torch.Tensor: hidden_states = hidden_states.view(hidden_states.shape[0], -1) hidden_states = torch.cat((hidden_states, temb), dim=-1) for layer in self.final_block: hidden_states = layer(hidden_states) return hidden_states _kernels = { "linear": [1 / 8, 3 / 8, 3 / 8, 1 / 8], "cubic": [-0.01171875, -0.03515625, 0.11328125, 0.43359375, 0.43359375, 0.11328125, -0.03515625, -0.01171875], "lanczos3": [ 0.003689131001010537, 0.015056144446134567, -0.03399861603975296, -0.066637322306633, 0.13550527393817902, 0.44638532400131226, 0.44638532400131226, 0.13550527393817902, -0.066637322306633, -0.03399861603975296, 0.015056144446134567, 0.003689131001010537, ], } class Downsample1d(nn.Module): def __init__(self, kernel: str = "linear", pad_mode: str = "reflect"): super().__init__() self.pad_mode = pad_mode kernel_1d = torch.tensor(_kernels[kernel]) self.pad = kernel_1d.shape[0] // 2 - 1 self.register_buffer("kernel", kernel_1d) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = F.pad(hidden_states, (self.pad,) * 2, self.pad_mode) weight = hidden_states.new_zeros([hidden_states.shape[1], hidden_states.shape[1], self.kernel.shape[0]]) indices = torch.arange(hidden_states.shape[1], device=hidden_states.device) kernel = self.kernel.to(weight)[None, :].expand(hidden_states.shape[1], -1) weight[indices, indices] = kernel return F.conv1d(hidden_states, weight, stride=2) class Upsample1d(nn.Module): def __init__(self, kernel: str = "linear", pad_mode: str = "reflect"): super().__init__() self.pad_mode = pad_mode kernel_1d = torch.tensor(_kernels[kernel]) * 2 self.pad = kernel_1d.shape[0] // 2 - 1 self.register_buffer("kernel", kernel_1d) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor: hidden_states = F.pad(hidden_states, ((self.pad + 1) // 2,) * 2, self.pad_mode) weight = hidden_states.new_zeros([hidden_states.shape[1], hidden_states.shape[1], self.kernel.shape[0]]) indices = torch.arange(hidden_states.shape[1], device=hidden_states.device) kernel = self.kernel.to(weight)[None, :].expand(hidden_states.shape[1], -1) weight[indices, indices] = kernel return F.conv_transpose1d(hidden_states, weight, stride=2, padding=self.pad * 2 + 1) class SelfAttention1d(nn.Module): def __init__(self, in_channels: int, n_head: int = 1, dropout_rate: float = 0.0): super().__init__() self.channels = in_channels self.group_norm = nn.GroupNorm(1, num_channels=in_channels) self.num_heads = n_head self.query = nn.Linear(self.channels, self.channels) self.key = nn.Linear(self.channels, self.channels) self.value = nn.Linear(self.channels, self.channels) self.proj_attn = nn.Linear(self.channels, self.channels, bias=True) self.dropout = nn.Dropout(dropout_rate, inplace=True) def transpose_for_scores(self, projection: torch.Tensor) -> torch.Tensor: new_projection_shape = projection.size()[:-1] + (self.num_heads, -1) # move heads to 2nd position (B, T, H * D) -> (B, T, H, D) -> (B, H, T, D) new_projection = projection.view(new_projection_shape).permute(0, 2, 1, 3) return new_projection def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: residual = hidden_states batch, channel_dim, seq = hidden_states.shape hidden_states = self.group_norm(hidden_states) hidden_states = hidden_states.transpose(1, 2) query_proj = self.query(hidden_states) key_proj = self.key(hidden_states) value_proj = self.value(hidden_states) query_states = self.transpose_for_scores(query_proj) key_states = self.transpose_for_scores(key_proj) value_states = self.transpose_for_scores(value_proj) scale = 1 / math.sqrt(math.sqrt(key_states.shape[-1])) attention_scores = torch.matmul(query_states * scale, key_states.transpose(-1, -2) * scale) attention_probs = torch.softmax(attention_scores, dim=-1) # compute attention output hidden_states = torch.matmul(attention_probs, value_states) hidden_states = hidden_states.permute(0, 2, 1, 3).contiguous() new_hidden_states_shape = hidden_states.size()[:-2] + (self.channels,) hidden_states = hidden_states.view(new_hidden_states_shape) # compute next hidden_states hidden_states = self.proj_attn(hidden_states) hidden_states = hidden_states.transpose(1, 2) hidden_states = self.dropout(hidden_states) output = hidden_states + residual return output class ResConvBlock(nn.Module): def __init__(self, in_channels: int, mid_channels: int, out_channels: int, is_last: bool = False): super().__init__() self.is_last = is_last self.has_conv_skip = in_channels != out_channels if self.has_conv_skip: self.conv_skip = nn.Conv1d(in_channels, out_channels, 1, bias=False) self.conv_1 = nn.Conv1d(in_channels, mid_channels, 5, padding=2) self.group_norm_1 = nn.GroupNorm(1, mid_channels) self.gelu_1 = nn.GELU() self.conv_2 = nn.Conv1d(mid_channels, out_channels, 5, padding=2) if not self.is_last: self.group_norm_2 = nn.GroupNorm(1, out_channels) self.gelu_2 = nn.GELU() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: residual = self.conv_skip(hidden_states) if self.has_conv_skip else hidden_states hidden_states = self.conv_1(hidden_states) hidden_states = self.group_norm_1(hidden_states) hidden_states = self.gelu_1(hidden_states) hidden_states = self.conv_2(hidden_states) if not self.is_last: hidden_states = self.group_norm_2(hidden_states) hidden_states = self.gelu_2(hidden_states) output = hidden_states + residual return output class UNetMidBlock1D(nn.Module): def __init__(self, mid_channels: int, in_channels: int, out_channels: Optional[int] = None): super().__init__() out_channels = in_channels if out_channels is None else out_channels # there is always at least one resnet self.down = Downsample1d("cubic") resnets = [ ResConvBlock(in_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, out_channels), ] attentions = [ SelfAttention1d(mid_channels, mid_channels // 32), SelfAttention1d(mid_channels, mid_channels // 32), SelfAttention1d(mid_channels, mid_channels // 32), SelfAttention1d(mid_channels, mid_channels // 32), SelfAttention1d(mid_channels, mid_channels // 32), SelfAttention1d(out_channels, out_channels // 32), ] self.up = Upsample1d(kernel="cubic") self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor: hidden_states = self.down(hidden_states) for attn, resnet in zip(self.attentions, self.resnets): hidden_states = resnet(hidden_states) hidden_states = attn(hidden_states) hidden_states = self.up(hidden_states) return hidden_states class AttnDownBlock1D(nn.Module): def __init__(self, out_channels: int, in_channels: int, mid_channels: Optional[int] = None): super().__init__() mid_channels = out_channels if mid_channels is None else mid_channels self.down = Downsample1d("cubic") resnets = [ ResConvBlock(in_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, out_channels), ] attentions = [ SelfAttention1d(mid_channels, mid_channels // 32), SelfAttention1d(mid_channels, mid_channels // 32), SelfAttention1d(out_channels, out_channels // 32), ] self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor: hidden_states = self.down(hidden_states) for resnet, attn in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states) hidden_states = attn(hidden_states) return hidden_states, (hidden_states,) class DownBlock1D(nn.Module): def __init__(self, out_channels: int, in_channels: int, mid_channels: Optional[int] = None): super().__init__() mid_channels = out_channels if mid_channels is None else mid_channels self.down = Downsample1d("cubic") resnets = [ ResConvBlock(in_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, out_channels), ] self.resnets = nn.ModuleList(resnets) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor: hidden_states = self.down(hidden_states) for resnet in self.resnets: hidden_states = resnet(hidden_states) return hidden_states, (hidden_states,) class DownBlock1DNoSkip(nn.Module): def __init__(self, out_channels: int, in_channels: int, mid_channels: Optional[int] = None): super().__init__() mid_channels = out_channels if mid_channels is None else mid_channels resnets = [ ResConvBlock(in_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, out_channels), ] self.resnets = nn.ModuleList(resnets) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor: hidden_states = torch.cat([hidden_states, temb], dim=1) for resnet in self.resnets: hidden_states = resnet(hidden_states) return hidden_states, (hidden_states,) class AttnUpBlock1D(nn.Module): def __init__(self, in_channels: int, out_channels: int, mid_channels: Optional[int] = None): super().__init__() mid_channels = out_channels if mid_channels is None else mid_channels resnets = [ ResConvBlock(2 * in_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, out_channels), ] attentions = [ SelfAttention1d(mid_channels, mid_channels // 32), SelfAttention1d(mid_channels, mid_channels // 32), SelfAttention1d(out_channels, out_channels // 32), ] self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.up = Upsample1d(kernel="cubic") def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, ) -> torch.Tensor: res_hidden_states = res_hidden_states_tuple[-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) for resnet, attn in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states) hidden_states = attn(hidden_states) hidden_states = self.up(hidden_states) return hidden_states class UpBlock1D(nn.Module): def __init__(self, in_channels: int, out_channels: int, mid_channels: Optional[int] = None): super().__init__() mid_channels = in_channels if mid_channels is None else mid_channels resnets = [ ResConvBlock(2 * in_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, out_channels), ] self.resnets = nn.ModuleList(resnets) self.up = Upsample1d(kernel="cubic") def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, ) -> torch.Tensor: res_hidden_states = res_hidden_states_tuple[-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) for resnet in self.resnets: hidden_states = resnet(hidden_states) hidden_states = self.up(hidden_states) return hidden_states class UpBlock1DNoSkip(nn.Module): def __init__(self, in_channels: int, out_channels: int, mid_channels: Optional[int] = None): super().__init__() mid_channels = in_channels if mid_channels is None else mid_channels resnets = [ ResConvBlock(2 * in_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, out_channels, is_last=True), ] self.resnets = nn.ModuleList(resnets) def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, ) -> torch.Tensor: res_hidden_states = res_hidden_states_tuple[-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) for resnet in self.resnets: hidden_states = resnet(hidden_states) return hidden_states DownBlockType = Union[DownResnetBlock1D, DownBlock1D, AttnDownBlock1D, DownBlock1DNoSkip] MidBlockType = Union[MidResTemporalBlock1D, ValueFunctionMidBlock1D, UNetMidBlock1D] OutBlockType = Union[OutConv1DBlock, OutValueFunctionBlock] UpBlockType = Union[UpResnetBlock1D, UpBlock1D, AttnUpBlock1D, UpBlock1DNoSkip] def get_down_block( down_block_type: str, num_layers: int, in_channels: int, out_channels: int, temb_channels: int, add_downsample: bool, ) -> DownBlockType: if down_block_type == "DownResnetBlock1D": return DownResnetBlock1D( in_channels=in_channels, num_layers=num_layers, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, ) elif down_block_type == "DownBlock1D": return DownBlock1D(out_channels=out_channels, in_channels=in_channels) elif down_block_type == "AttnDownBlock1D": return AttnDownBlock1D(out_channels=out_channels, in_channels=in_channels) elif down_block_type == "DownBlock1DNoSkip": return DownBlock1DNoSkip(out_channels=out_channels, in_channels=in_channels) raise ValueError(f"{down_block_type} does not exist.") def get_up_block( up_block_type: str, num_layers: int, in_channels: int, out_channels: int, temb_channels: int, add_upsample: bool ) -> UpBlockType: if up_block_type == "UpResnetBlock1D": return UpResnetBlock1D( in_channels=in_channels, num_layers=num_layers, out_channels=out_channels, temb_channels=temb_channels, add_upsample=add_upsample, ) elif up_block_type == "UpBlock1D": return UpBlock1D(in_channels=in_channels, out_channels=out_channels) elif up_block_type == "AttnUpBlock1D": return AttnUpBlock1D(in_channels=in_channels, out_channels=out_channels) elif up_block_type == "UpBlock1DNoSkip": return UpBlock1DNoSkip(in_channels=in_channels, out_channels=out_channels) raise ValueError(f"{up_block_type} does not exist.") def get_mid_block( mid_block_type: str, num_layers: int, in_channels: int, mid_channels: int, out_channels: int, embed_dim: int, add_downsample: bool, ) -> MidBlockType: if mid_block_type == "MidResTemporalBlock1D": return MidResTemporalBlock1D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, embed_dim=embed_dim, add_downsample=add_downsample, ) elif mid_block_type == "ValueFunctionMidBlock1D": return ValueFunctionMidBlock1D(in_channels=in_channels, out_channels=out_channels, embed_dim=embed_dim) elif mid_block_type == "UNetMidBlock1D": return UNetMidBlock1D(in_channels=in_channels, mid_channels=mid_channels, out_channels=out_channels) raise ValueError(f"{mid_block_type} does not exist.") def get_out_block( *, out_block_type: str, num_groups_out: int, embed_dim: int, out_channels: int, act_fn: str, fc_dim: int ) -> Optional[OutBlockType]: if out_block_type == "OutConv1DBlock": return OutConv1DBlock(num_groups_out, out_channels, embed_dim, act_fn) elif out_block_type == "ValueFunction": return OutValueFunctionBlock(fc_dim, embed_dim, act_fn) return None
diffusers/src/diffusers/models/unets/unet_1d_blocks.py/0
{ "file_path": "diffusers/src/diffusers/models/unets/unet_1d_blocks.py", "repo_id": "diffusers", "token_count": 11970 }
166
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..utils import deprecate from .autoencoders.vq_model import VQEncoderOutput, VQModel class VQEncoderOutput(VQEncoderOutput): def __init__(self, *args, **kwargs): deprecation_message = "Importing `VQEncoderOutput` from `diffusers.models.vq_model` is deprecated and this will be removed in a future version. Please use `from diffusers.models.autoencoders.vq_model import VQEncoderOutput`, instead." deprecate("VQEncoderOutput", "0.31", deprecation_message) super().__init__(*args, **kwargs) class VQModel(VQModel): def __init__(self, *args, **kwargs): deprecation_message = "Importing `VQModel` from `diffusers.models.vq_model` is deprecated and this will be removed in a future version. Please use `from diffusers.models.autoencoders.vq_model import VQModel`, instead." deprecate("VQModel", "0.31", deprecation_message) super().__init__(*args, **kwargs)
diffusers/src/diffusers/models/vq_model.py/0
{ "file_path": "diffusers/src/diffusers/models/vq_model.py", "repo_id": "diffusers", "token_count": 490 }
167
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, List, Optional, Tuple import torch from ...configuration_utils import FrozenDict from ...guiders import ClassifierFreeGuidance from ...models import ControlNetModel, UNet2DConditionModel from ...schedulers import EulerDiscreteScheduler from ...utils import logging from ..modular_pipeline import ( BlockState, LoopSequentialPipelineBlocks, ModularPipelineBlocks, PipelineState, ) from ..modular_pipeline_utils import ComponentSpec, InputParam, OutputParam from .modular_pipeline import StableDiffusionXLModularPipeline logger = logging.get_logger(__name__) # pylint: disable=invalid-name # YiYi experimenting composible denoise loop # loop step (1): prepare latent input for denoiser class StableDiffusionXLLoopBeforeDenoiser(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec("scheduler", EulerDiscreteScheduler), ] @property def description(self) -> str: return ( "step within the denoising loop that prepare the latent input for the denoiser. " "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` " "object (e.g. `StableDiffusionXLDenoiseLoopWrapper`)" ) @property def inputs(self) -> List[str]: return [ InputParam( "latents", required=True, type_hint=torch.Tensor, description="The initial latents to use for the denoising process. Can be generated in prepare_latent step.", ), ] @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, block_state: BlockState, i: int, t: int): block_state.scaled_latents = components.scheduler.scale_model_input(block_state.latents, t) return components, block_state # loop step (1): prepare latent input for denoiser (with inpainting) class StableDiffusionXLInpaintLoopBeforeDenoiser(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec("scheduler", EulerDiscreteScheduler), ComponentSpec("unet", UNet2DConditionModel), ] @property def description(self) -> str: return ( "step within the denoising loop that prepare the latent input for the denoiser (for inpainting workflow only). " "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` object" ) @property def inputs(self) -> List[str]: return [ InputParam( "latents", required=True, type_hint=torch.Tensor, description="The initial latents to use for the denoising process. Can be generated in prepare_latent step.", ), InputParam( "mask", type_hint=Optional[torch.Tensor], description="The mask to use for the denoising process, for inpainting task only. Can be generated in vae_encode or prepare_latent step.", ), InputParam( "masked_image_latents", type_hint=Optional[torch.Tensor], description="The masked image latents to use for the denoising process, for inpainting task only. Can be generated in vae_encode or prepare_latent step.", ), ] @staticmethod def check_inputs(components, block_state): num_channels_unet = components.num_channels_unet if num_channels_unet == 9: # default case for runwayml/stable-diffusion-inpainting if block_state.mask is None or block_state.masked_image_latents is None: raise ValueError("mask and masked_image_latents must be provided for inpainting-specific Unet") num_channels_latents = block_state.latents.shape[1] num_channels_mask = block_state.mask.shape[1] num_channels_masked_image = block_state.masked_image_latents.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != num_channels_unet: raise ValueError( f"Incorrect configuration settings! The config of `components.unet`: {components.unet.config} expects" f" {components.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" f" = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of" " `components.unet` or your `mask_image` or `image` input." ) @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, block_state: BlockState, i: int, t: int): self.check_inputs(components, block_state) block_state.scaled_latents = components.scheduler.scale_model_input(block_state.latents, t) if components.num_channels_unet == 9: block_state.scaled_latents = torch.cat( [block_state.scaled_latents, block_state.mask, block_state.masked_image_latents], dim=1 ) return components, block_state # loop step (2): denoise the latents with guidance class StableDiffusionXLLoopDenoiser(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec( "guider", ClassifierFreeGuidance, config=FrozenDict({"guidance_scale": 7.5}), default_creation_method="from_config", ), ComponentSpec("unet", UNet2DConditionModel), ] @property def description(self) -> str: return ( "Step within the denoising loop that denoise the latents with guidance. " "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` " "object (e.g. `StableDiffusionXLDenoiseLoopWrapper`)" ) @property def inputs(self) -> List[Tuple[str, Any]]: return [ InputParam("cross_attention_kwargs"), InputParam( "num_inference_steps", required=True, type_hint=int, description="The number of inference steps to use for the denoising process. Can be generated in set_timesteps step.", ), InputParam( "timestep_cond", type_hint=Optional[torch.Tensor], description="The guidance scale embedding to use for Latent Consistency Models(LCMs). Can be generated in prepare_additional_conditioning step.", ), InputParam( kwargs_type="guider_input_fields", description=( "All conditional model inputs that need to be prepared with guider. " "It should contain prompt_embeds/negative_prompt_embeds, " "add_time_ids/negative_add_time_ids, " "pooled_prompt_embeds/negative_pooled_prompt_embeds, " "and ip_adapter_embeds/negative_ip_adapter_embeds (optional)." "please add `kwargs_type=guider_input_fields` to their parameter spec (`OutputParam`) when they are created and added to the pipeline state" ), ), ] @torch.no_grad() def __call__( self, components: StableDiffusionXLModularPipeline, block_state: BlockState, i: int, t: int ) -> PipelineState: # Map the keys we'll see on each `guider_state_batch` (e.g. guider_state_batch.prompt_embeds) # to the corresponding (cond, uncond) fields on block_state. (e.g. block_state.prompt_embeds, block_state.negative_prompt_embeds) guider_input_fields = { "prompt_embeds": ("prompt_embeds", "negative_prompt_embeds"), "time_ids": ("add_time_ids", "negative_add_time_ids"), "text_embeds": ("pooled_prompt_embeds", "negative_pooled_prompt_embeds"), "image_embeds": ("ip_adapter_embeds", "negative_ip_adapter_embeds"), } components.guider.set_state(step=i, num_inference_steps=block_state.num_inference_steps, timestep=t) # Prepare mini‐batches according to guidance method and `guider_input_fields` # Each guider_state_batch will have .prompt_embeds, .time_ids, text_embeds, image_embeds. # e.g. for CFG, we prepare two batches: one for uncond, one for cond # for first batch, guider_state_batch.prompt_embeds correspond to block_state.prompt_embeds # for second batch, guider_state_batch.prompt_embeds correspond to block_state.negative_prompt_embeds guider_state = components.guider.prepare_inputs(block_state, guider_input_fields) # run the denoiser for each guidance batch for guider_state_batch in guider_state: components.guider.prepare_models(components.unet) cond_kwargs = guider_state_batch.as_dict() cond_kwargs = {k: v for k, v in cond_kwargs.items() if k in guider_input_fields} prompt_embeds = cond_kwargs.pop("prompt_embeds") # Predict the noise residual # store the noise_pred in guider_state_batch so that we can apply guidance across all batches guider_state_batch.noise_pred = components.unet( block_state.scaled_latents, t, encoder_hidden_states=prompt_embeds, timestep_cond=block_state.timestep_cond, cross_attention_kwargs=block_state.cross_attention_kwargs, added_cond_kwargs=cond_kwargs, return_dict=False, )[0] components.guider.cleanup_models(components.unet) # Perform guidance block_state.noise_pred, block_state.scheduler_step_kwargs = components.guider(guider_state) return components, block_state # loop step (2): denoise the latents with guidance (with controlnet) class StableDiffusionXLControlNetLoopDenoiser(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec( "guider", ClassifierFreeGuidance, config=FrozenDict({"guidance_scale": 7.5}), default_creation_method="from_config", ), ComponentSpec("unet", UNet2DConditionModel), ComponentSpec("controlnet", ControlNetModel), ] @property def description(self) -> str: return ( "step within the denoising loop that denoise the latents with guidance (with controlnet). " "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` " "object (e.g. `StableDiffusionXLDenoiseLoopWrapper`)" ) @property def inputs(self) -> List[Tuple[str, Any]]: return [ InputParam("cross_attention_kwargs"), InputParam( "controlnet_cond", required=True, type_hint=torch.Tensor, description="The control image to use for the denoising process. Can be generated in prepare_controlnet_inputs step.", ), InputParam( "conditioning_scale", type_hint=float, description="The controlnet conditioning scale value to use for the denoising process. Can be generated in prepare_controlnet_inputs step.", ), InputParam( "guess_mode", required=True, type_hint=bool, description="The guess mode value to use for the denoising process. Can be generated in prepare_controlnet_inputs step.", ), InputParam( "controlnet_keep", required=True, type_hint=List[float], description="The controlnet keep values to use for the denoising process. Can be generated in prepare_controlnet_inputs step.", ), InputParam( "timestep_cond", type_hint=Optional[torch.Tensor], description="The guidance scale embedding to use for Latent Consistency Models(LCMs), can be generated by prepare_additional_conditioning step", ), InputParam( "num_inference_steps", required=True, type_hint=int, description="The number of inference steps to use for the denoising process. Can be generated in set_timesteps step.", ), InputParam( kwargs_type="guider_input_fields", description=( "All conditional model inputs that need to be prepared with guider. " "It should contain prompt_embeds/negative_prompt_embeds, " "add_time_ids/negative_add_time_ids, " "pooled_prompt_embeds/negative_pooled_prompt_embeds, " "and ip_adapter_embeds/negative_ip_adapter_embeds (optional)." "please add `kwargs_type=guider_input_fields` to their parameter spec (`OutputParam`) when they are created and added to the pipeline state" ), ), InputParam( kwargs_type="controlnet_kwargs", description=( "additional kwargs for controlnet (e.g. control_type_idx and control_type from the controlnet union input step )" "please add `kwargs_type=controlnet_kwargs` to their parameter spec (`OutputParam`) when they are created and added to the pipeline state" ), ), ] @staticmethod def prepare_extra_kwargs(func, exclude_kwargs=[], **kwargs): accepted_kwargs = set(inspect.signature(func).parameters.keys()) extra_kwargs = {} for key, value in kwargs.items(): if key in accepted_kwargs and key not in exclude_kwargs: extra_kwargs[key] = value return extra_kwargs @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, block_state: BlockState, i: int, t: int): extra_controlnet_kwargs = self.prepare_extra_kwargs( components.controlnet.forward, **block_state.controlnet_kwargs ) # Map the keys we'll see on each `guider_state_batch` (e.g. guider_state_batch.prompt_embeds) # to the corresponding (cond, uncond) fields on block_state. (e.g. block_state.prompt_embeds, block_state.negative_prompt_embeds) guider_input_fields = { "prompt_embeds": ("prompt_embeds", "negative_prompt_embeds"), "time_ids": ("add_time_ids", "negative_add_time_ids"), "text_embeds": ("pooled_prompt_embeds", "negative_pooled_prompt_embeds"), "image_embeds": ("ip_adapter_embeds", "negative_ip_adapter_embeds"), } # cond_scale for the timestep (controlnet input) if isinstance(block_state.controlnet_keep[i], list): block_state.cond_scale = [ c * s for c, s in zip(block_state.conditioning_scale, block_state.controlnet_keep[i]) ] else: controlnet_cond_scale = block_state.conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] block_state.cond_scale = controlnet_cond_scale * block_state.controlnet_keep[i] # default controlnet output/unet input for guess mode + conditional path block_state.down_block_res_samples_zeros = None block_state.mid_block_res_sample_zeros = None # guided denoiser step components.guider.set_state(step=i, num_inference_steps=block_state.num_inference_steps, timestep=t) # Prepare mini‐batches according to guidance method and `guider_input_fields` # Each guider_state_batch will have .prompt_embeds, .time_ids, text_embeds, image_embeds. # e.g. for CFG, we prepare two batches: one for uncond, one for cond # for first batch, guider_state_batch.prompt_embeds correspond to block_state.prompt_embeds # for second batch, guider_state_batch.prompt_embeds correspond to block_state.negative_prompt_embeds guider_state = components.guider.prepare_inputs(block_state, guider_input_fields) # run the denoiser for each guidance batch for guider_state_batch in guider_state: components.guider.prepare_models(components.unet) # Prepare additional conditionings added_cond_kwargs = { "text_embeds": guider_state_batch.text_embeds, "time_ids": guider_state_batch.time_ids, } if hasattr(guider_state_batch, "image_embeds") and guider_state_batch.image_embeds is not None: added_cond_kwargs["image_embeds"] = guider_state_batch.image_embeds # Prepare controlnet additional conditionings controlnet_added_cond_kwargs = { "text_embeds": guider_state_batch.text_embeds, "time_ids": guider_state_batch.time_ids, } # run controlnet for the guidance batch if block_state.guess_mode and not components.guider.is_conditional: # guider always run uncond batch first, so these tensors should be set already down_block_res_samples = block_state.down_block_res_samples_zeros mid_block_res_sample = block_state.mid_block_res_sample_zeros else: down_block_res_samples, mid_block_res_sample = components.controlnet( block_state.scaled_latents, t, encoder_hidden_states=guider_state_batch.prompt_embeds, controlnet_cond=block_state.controlnet_cond, conditioning_scale=block_state.cond_scale, guess_mode=block_state.guess_mode, added_cond_kwargs=controlnet_added_cond_kwargs, return_dict=False, **extra_controlnet_kwargs, ) # assign it to block_state so it will be available for the uncond guidance batch if block_state.down_block_res_samples_zeros is None: block_state.down_block_res_samples_zeros = [torch.zeros_like(d) for d in down_block_res_samples] if block_state.mid_block_res_sample_zeros is None: block_state.mid_block_res_sample_zeros = torch.zeros_like(mid_block_res_sample) # Predict the noise # store the noise_pred in guider_state_batch so we can apply guidance across all batches guider_state_batch.noise_pred = components.unet( block_state.scaled_latents, t, encoder_hidden_states=guider_state_batch.prompt_embeds, timestep_cond=block_state.timestep_cond, cross_attention_kwargs=block_state.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, return_dict=False, )[0] components.guider.cleanup_models(components.unet) # Perform guidance block_state.noise_pred, block_state.scheduler_step_kwargs = components.guider(guider_state) return components, block_state # loop step (3): scheduler step to update latents class StableDiffusionXLLoopAfterDenoiser(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec("scheduler", EulerDiscreteScheduler), ] @property def description(self) -> str: return ( "step within the denoising loop that update the latents. " "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` " "object (e.g. `StableDiffusionXLDenoiseLoopWrapper`)" ) @property def inputs(self) -> List[Tuple[str, Any]]: return [ InputParam("eta", default=0.0), InputParam("generator"), ] @property def intermediate_outputs(self) -> List[OutputParam]: return [OutputParam("latents", type_hint=torch.Tensor, description="The denoised latents")] # YiYi TODO: move this out of here @staticmethod def prepare_extra_kwargs(func, exclude_kwargs=[], **kwargs): accepted_kwargs = set(inspect.signature(func).parameters.keys()) extra_kwargs = {} for key, value in kwargs.items(): if key in accepted_kwargs and key not in exclude_kwargs: extra_kwargs[key] = value return extra_kwargs @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, block_state: BlockState, i: int, t: int): # Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline block_state.extra_step_kwargs = self.prepare_extra_kwargs( components.scheduler.step, generator=block_state.generator, eta=block_state.eta ) # Perform scheduler step using the predicted output block_state.latents_dtype = block_state.latents.dtype block_state.latents = components.scheduler.step( block_state.noise_pred, t, block_state.latents, **block_state.extra_step_kwargs, **block_state.scheduler_step_kwargs, return_dict=False, )[0] if block_state.latents.dtype != block_state.latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 block_state.latents = block_state.latents.to(block_state.latents_dtype) return components, block_state # loop step (3): scheduler step to update latents (with inpainting) class StableDiffusionXLInpaintLoopAfterDenoiser(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec("scheduler", EulerDiscreteScheduler), ComponentSpec("unet", UNet2DConditionModel), ] @property def description(self) -> str: return ( "step within the denoising loop that update the latents (for inpainting workflow only). " "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` " "object (e.g. `StableDiffusionXLDenoiseLoopWrapper`)" ) @property def inputs(self) -> List[Tuple[str, Any]]: return [ InputParam("eta", default=0.0), InputParam("generator"), InputParam( "timesteps", required=True, type_hint=torch.Tensor, description="The timesteps to use for the denoising process. Can be generated in set_timesteps step.", ), InputParam( "mask", type_hint=Optional[torch.Tensor], description="The mask to use for the denoising process, for inpainting task only. Can be generated in vae_encode or prepare_latent step.", ), InputParam( "noise", type_hint=Optional[torch.Tensor], description="The noise added to the image latents, for inpainting task only. Can be generated in prepare_latent step.", ), InputParam( "image_latents", type_hint=Optional[torch.Tensor], description="The image latents to use for the denoising process, for inpainting/image-to-image task only. Can be generated in vae_encode or prepare_latent step.", ), ] @property def intermediate_outputs(self) -> List[OutputParam]: return [OutputParam("latents", type_hint=torch.Tensor, description="The denoised latents")] @staticmethod def prepare_extra_kwargs(func, exclude_kwargs=[], **kwargs): accepted_kwargs = set(inspect.signature(func).parameters.keys()) extra_kwargs = {} for key, value in kwargs.items(): if key in accepted_kwargs and key not in exclude_kwargs: extra_kwargs[key] = value return extra_kwargs def check_inputs(self, components, block_state): if components.num_channels_unet == 4: if block_state.image_latents is None: raise ValueError(f"image_latents is required for this step {self.__class__.__name__}") if block_state.mask is None: raise ValueError(f"mask is required for this step {self.__class__.__name__}") if block_state.noise is None: raise ValueError(f"noise is required for this step {self.__class__.__name__}") @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, block_state: BlockState, i: int, t: int): self.check_inputs(components, block_state) # Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline block_state.extra_step_kwargs = self.prepare_extra_kwargs( components.scheduler.step, generator=block_state.generator, eta=block_state.eta ) # Perform scheduler step using the predicted output block_state.latents_dtype = block_state.latents.dtype block_state.latents = components.scheduler.step( block_state.noise_pred, t, block_state.latents, **block_state.extra_step_kwargs, **block_state.scheduler_step_kwargs, return_dict=False, )[0] if block_state.latents.dtype != block_state.latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 block_state.latents = block_state.latents.to(block_state.latents_dtype) # adjust latent for inpainting if components.num_channels_unet == 4: block_state.init_latents_proper = block_state.image_latents if i < len(block_state.timesteps) - 1: block_state.noise_timestep = block_state.timesteps[i + 1] block_state.init_latents_proper = components.scheduler.add_noise( block_state.init_latents_proper, block_state.noise, torch.tensor([block_state.noise_timestep]) ) block_state.latents = ( 1 - block_state.mask ) * block_state.init_latents_proper + block_state.mask * block_state.latents return components, block_state # the loop wrapper that iterates over the timesteps class StableDiffusionXLDenoiseLoopWrapper(LoopSequentialPipelineBlocks): model_name = "stable-diffusion-xl" @property def description(self) -> str: return ( "Pipeline block that iteratively denoise the latents over `timesteps`. " "The specific steps with each iteration can be customized with `sub_blocks` attributes" ) @property def loop_expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec( "guider", ClassifierFreeGuidance, config=FrozenDict({"guidance_scale": 7.5}), default_creation_method="from_config", ), ComponentSpec("scheduler", EulerDiscreteScheduler), ComponentSpec("unet", UNet2DConditionModel), ] @property def loop_inputs(self) -> List[InputParam]: return [ InputParam( "timesteps", required=True, type_hint=torch.Tensor, description="The timesteps to use for the denoising process. Can be generated in set_timesteps step.", ), InputParam( "num_inference_steps", required=True, type_hint=int, description="The number of inference steps to use for the denoising process. Can be generated in set_timesteps step.", ), ] @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) block_state.disable_guidance = True if components.unet.config.time_cond_proj_dim is not None else False if block_state.disable_guidance: components.guider.disable() else: components.guider.enable() block_state.num_warmup_steps = max( len(block_state.timesteps) - block_state.num_inference_steps * components.scheduler.order, 0 ) with self.progress_bar(total=block_state.num_inference_steps) as progress_bar: for i, t in enumerate(block_state.timesteps): components, block_state = self.loop_step(components, block_state, i=i, t=t) if i == len(block_state.timesteps) - 1 or ( (i + 1) > block_state.num_warmup_steps and (i + 1) % components.scheduler.order == 0 ): progress_bar.update() self.set_block_state(state, block_state) return components, state # composing the denoising loops class StableDiffusionXLDenoiseStep(StableDiffusionXLDenoiseLoopWrapper): block_classes = [ StableDiffusionXLLoopBeforeDenoiser, StableDiffusionXLLoopDenoiser, StableDiffusionXLLoopAfterDenoiser, ] block_names = ["before_denoiser", "denoiser", "after_denoiser"] @property def description(self) -> str: return ( "Denoise step that iteratively denoise the latents. \n" "Its loop logic is defined in `StableDiffusionXLDenoiseLoopWrapper.__call__` method \n" "At each iteration, it runs blocks defined in `sub_blocks` sequencially:\n" " - `StableDiffusionXLLoopBeforeDenoiser`\n" " - `StableDiffusionXLLoopDenoiser`\n" " - `StableDiffusionXLLoopAfterDenoiser`\n" "This block supports both text2img and img2img tasks." ) # control_cond class StableDiffusionXLControlNetDenoiseStep(StableDiffusionXLDenoiseLoopWrapper): block_classes = [ StableDiffusionXLLoopBeforeDenoiser, StableDiffusionXLControlNetLoopDenoiser, StableDiffusionXLLoopAfterDenoiser, ] block_names = ["before_denoiser", "denoiser", "after_denoiser"] @property def description(self) -> str: return ( "Denoise step that iteratively denoise the latents with controlnet. \n" "Its loop logic is defined in `StableDiffusionXLDenoiseLoopWrapper.__call__` method \n" "At each iteration, it runs blocks defined in `sub_blocks` sequencially:\n" " - `StableDiffusionXLLoopBeforeDenoiser`\n" " - `StableDiffusionXLControlNetLoopDenoiser`\n" " - `StableDiffusionXLLoopAfterDenoiser`\n" "This block supports using controlnet for both text2img and img2img tasks." ) # mask class StableDiffusionXLInpaintDenoiseStep(StableDiffusionXLDenoiseLoopWrapper): block_classes = [ StableDiffusionXLInpaintLoopBeforeDenoiser, StableDiffusionXLLoopDenoiser, StableDiffusionXLInpaintLoopAfterDenoiser, ] block_names = ["before_denoiser", "denoiser", "after_denoiser"] @property def description(self) -> str: return ( "Denoise step that iteratively denoise the latents(for inpainting task only). \n" "Its loop logic is defined in `StableDiffusionXLDenoiseLoopWrapper.__call__` method \n" "At each iteration, it runs blocks defined in `sub_blocks` sequencially:\n" " - `StableDiffusionXLInpaintLoopBeforeDenoiser`\n" " - `StableDiffusionXLLoopDenoiser`\n" " - `StableDiffusionXLInpaintLoopAfterDenoiser`\n" "This block onlysupports inpainting tasks." ) # control_cond + mask class StableDiffusionXLInpaintControlNetDenoiseStep(StableDiffusionXLDenoiseLoopWrapper): block_classes = [ StableDiffusionXLInpaintLoopBeforeDenoiser, StableDiffusionXLControlNetLoopDenoiser, StableDiffusionXLInpaintLoopAfterDenoiser, ] block_names = ["before_denoiser", "denoiser", "after_denoiser"] @property def description(self) -> str: return ( "Denoise step that iteratively denoise the latents(for inpainting task only) with controlnet. \n" "Its loop logic is defined in `StableDiffusionXLDenoiseLoopWrapper.__call__` method \n" "At each iteration, it runs blocks defined in `sub_blocks` sequencially:\n" " - `StableDiffusionXLInpaintLoopBeforeDenoiser`\n" " - `StableDiffusionXLControlNetLoopDenoiser`\n" " - `StableDiffusionXLInpaintLoopAfterDenoiser`\n" "This block only supports using controlnet for inpainting tasks." )
diffusers/src/diffusers/modular_pipelines/stable_diffusion_xl/denoise.py/0
{ "file_path": "diffusers/src/diffusers/modular_pipelines/stable_diffusion_xl/denoise.py", "repo_id": "diffusers", "token_count": 15351 }
168
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.utils.checkpoint from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import UNet2DConditionLoadersMixin from ...models.activations import get_activation from ...models.attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, ) from ...models.embeddings import ( TimestepEmbedding, Timesteps, ) from ...models.modeling_utils import ModelMixin from ...models.resnet import Downsample2D, ResnetBlock2D, Upsample2D from ...models.transformers.transformer_2d import Transformer2DModel from ...models.unets.unet_2d_blocks import DownBlock2D, UpBlock2D from ...models.unets.unet_2d_condition import UNet2DConditionOutput from ...utils import BaseOutput, logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name def add_special_tokens(hidden_states, attention_mask, sos_token, eos_token): batch_size = hidden_states.shape[0] if attention_mask is not None: # Add two more steps to attn mask new_attn_mask_step = attention_mask.new_ones((batch_size, 1)) attention_mask = torch.concat([new_attn_mask_step, attention_mask, new_attn_mask_step], dim=-1) # Add the SOS / EOS tokens at the start / end of the sequence respectively sos_token = sos_token.expand(batch_size, 1, -1) eos_token = eos_token.expand(batch_size, 1, -1) hidden_states = torch.concat([sos_token, hidden_states, eos_token], dim=1) return hidden_states, attention_mask @dataclass class AudioLDM2ProjectionModelOutput(BaseOutput): """ Args: Class for AudioLDM2 projection layer's outputs. hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states obtained by linearly projecting the hidden-states for each of the text encoders and subsequently concatenating them together. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices, formed by concatenating the attention masks for the two text encoders together. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. """ hidden_states: torch.Tensor attention_mask: Optional[torch.LongTensor] = None class AudioLDM2ProjectionModel(ModelMixin, ConfigMixin): """ A simple linear projection model to map two text embeddings to a shared latent space. It also inserts learned embedding vectors at the start and end of each text embedding sequence respectively. Each variable appended with `_1` refers to that corresponding to the second text encoder. Otherwise, it is from the first. Args: text_encoder_dim (`int`): Dimensionality of the text embeddings from the first text encoder (CLAP). text_encoder_1_dim (`int`): Dimensionality of the text embeddings from the second text encoder (T5 or VITS). langauge_model_dim (`int`): Dimensionality of the text embeddings from the language model (GPT2). """ @register_to_config def __init__( self, text_encoder_dim, text_encoder_1_dim, langauge_model_dim, use_learned_position_embedding=None, max_seq_length=None, ): super().__init__() # additional projection layers for each text encoder self.projection = nn.Linear(text_encoder_dim, langauge_model_dim) self.projection_1 = nn.Linear(text_encoder_1_dim, langauge_model_dim) # learnable SOS / EOS token embeddings for each text encoder self.sos_embed = nn.Parameter(torch.ones(langauge_model_dim)) self.eos_embed = nn.Parameter(torch.ones(langauge_model_dim)) self.sos_embed_1 = nn.Parameter(torch.ones(langauge_model_dim)) self.eos_embed_1 = nn.Parameter(torch.ones(langauge_model_dim)) self.use_learned_position_embedding = use_learned_position_embedding # learable positional embedding for vits encoder if self.use_learned_position_embedding is not None: self.learnable_positional_embedding = torch.nn.Parameter( torch.zeros((1, text_encoder_1_dim, max_seq_length)) ) def forward( self, hidden_states: Optional[torch.Tensor] = None, hidden_states_1: Optional[torch.Tensor] = None, attention_mask: Optional[torch.LongTensor] = None, attention_mask_1: Optional[torch.LongTensor] = None, ): hidden_states = self.projection(hidden_states) hidden_states, attention_mask = add_special_tokens( hidden_states, attention_mask, sos_token=self.sos_embed, eos_token=self.eos_embed ) # Add positional embedding for Vits hidden state if self.use_learned_position_embedding is not None: hidden_states_1 = (hidden_states_1.permute(0, 2, 1) + self.learnable_positional_embedding).permute(0, 2, 1) hidden_states_1 = self.projection_1(hidden_states_1) hidden_states_1, attention_mask_1 = add_special_tokens( hidden_states_1, attention_mask_1, sos_token=self.sos_embed_1, eos_token=self.eos_embed_1 ) # concatenate clap and t5 text encoding hidden_states = torch.cat([hidden_states, hidden_states_1], dim=1) # concatenate attention masks if attention_mask is None and attention_mask_1 is not None: attention_mask = attention_mask_1.new_ones((hidden_states[:2])) elif attention_mask is not None and attention_mask_1 is None: attention_mask_1 = attention_mask.new_ones((hidden_states_1[:2])) if attention_mask is not None and attention_mask_1 is not None: attention_mask = torch.cat([attention_mask, attention_mask_1], dim=-1) else: attention_mask = None return AudioLDM2ProjectionModelOutput( hidden_states=hidden_states, attention_mask=attention_mask, ) class AudioLDM2UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): r""" A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample shaped output. Compared to the vanilla [`UNet2DConditionModel`], this variant optionally includes an additional self-attention layer in each Transformer block, as well as multiple cross-attention layers. It also allows for up to two cross-attention embeddings, `encoder_hidden_states` and `encoder_hidden_states_1`. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): Number of channels in the output. flip_sin_to_cos (`bool`, *optional*, defaults to `False`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`): Block type for middle of UNet, it can only be `UNetMidBlock2DCrossAttn` for AudioLDM2. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`): The tuple of upsample blocks to use. only_cross_attention (`bool` or `Tuple[bool]`, *optional*, default to `False`): Whether to include self-attention in the basic transformer blocks, see [`~models.attention.BasicTransformerBlock`]. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If `None`, normalization and activation layers is skipped in post-processing. norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280): The dimension of the cross attention features. transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. num_attention_heads (`int`, *optional*): The number of attention heads. If not defined, defaults to `attention_head_dim` resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`. class_embed_type (`str`, *optional*, defaults to `None`): The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`, `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. num_class_embeds (`int`, *optional*, defaults to `None`): Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing class conditioning with `class_embed_type` equal to `None`. time_embedding_type (`str`, *optional*, defaults to `positional`): The type of position embedding to use for timesteps. Choose from `positional` or `fourier`. time_embedding_dim (`int`, *optional*, defaults to `None`): An optional override for the dimension of the projected time embedding. time_embedding_act_fn (`str`, *optional*, defaults to `None`): Optional activation function to use only once on the time embeddings before they are passed to the rest of the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`. timestep_post_act (`str`, *optional*, defaults to `None`): The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`. time_cond_proj_dim (`int`, *optional*, defaults to `None`): The dimension of `cond_proj` layer in the timestep embedding. conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer. conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer. projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when `class_embed_type="projection"`. class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time embeddings with the class embeddings. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 4, out_channels: int = 4, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ), mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn", up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int] = (320, 640, 1280, 1280), layers_per_block: Union[int, Tuple[int]] = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: Union[int, Tuple[int]] = 1280, transformer_layers_per_block: Union[int, Tuple[int]] = 1, attention_head_dim: Union[int, Tuple[int]] = 8, num_attention_heads: Optional[Union[int, Tuple[int]]] = None, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", time_embedding_type: str = "positional", time_embedding_dim: Optional[int] = None, time_embedding_act_fn: Optional[str] = None, timestep_post_act: Optional[str] = None, time_cond_proj_dim: Optional[int] = None, conv_in_kernel: int = 3, conv_out_kernel: int = 3, projection_class_embeddings_input_dim: Optional[int] = None, class_embeddings_concat: bool = False, ): super().__init__() self.sample_size = sample_size if num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = num_attention_heads or attention_head_dim # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError( f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." ) if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." ) if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError( f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." ) # input conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) # time if time_embedding_type == "positional": time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] else: raise ValueError(f"{time_embedding_type} does not exist. Please make sure to use `positional`.") self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, post_act_fn=timestep_post_act, cond_proj_dim=time_cond_proj_dim, ) # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == "projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" ) # The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. # # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif class_embed_type == "simple_projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" ) self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): only_cross_attention = [only_cross_attention] * len(down_block_types) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, ) self.down_blocks.append(down_block) # mid if mid_block_type == "UNetMidBlock2DCrossAttn": self.mid_block = UNetMidBlock2DCrossAttn( transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, ) else: raise ValueError( f"unknown mid_block_type : {mid_block_type}. Should be `UNetMidBlock2DCrossAttn` for AudioLDM2." ) # count how many layers upsample the images self.num_upsamplers = 0 # up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) only_cross_attention = list(reversed(only_cross_attention)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block( up_block_type, num_layers=reversed_layers_per_block[i] + 1, transformer_layers_per_block=reversed_transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=blocks_time_embed_dim, add_upsample=add_upsample, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=reversed_cross_attention_dim[i], num_attention_heads=reversed_num_attention_heads[i], use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out if norm_num_groups is not None: self.conv_norm_out = nn.GroupNorm( num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps ) self.conv_act = get_activation(act_fn) else: self.conv_norm_out = None self.conv_act = None conv_out_padding = (conv_out_kernel - 1) // 2 self.conv_out = nn.Conv2d( block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding ) @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attention_slice def set_attention_slice(self, slice_size): r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. This is useful for saving some memory in exchange for a small decrease in speed. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def forward( self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, class_labels: Optional[torch.Tensor] = None, timestep_cond: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, return_dict: bool = True, encoder_hidden_states_1: Optional[torch.Tensor] = None, encoder_attention_mask_1: Optional[torch.Tensor] = None, ) -> Union[UNet2DConditionOutput, Tuple]: r""" The [`AudioLDM2UNet2DConditionModel`] forward method. Args: sample (`torch.Tensor`): The noisy input tensor with the following shape `(batch, channel, height, width)`. timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.Tensor`): The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. encoder_attention_mask (`torch.Tensor`): A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttnProcessor`]. encoder_hidden_states_1 (`torch.Tensor`, *optional*): A second set of encoder hidden states with shape `(batch, sequence_length_2, feature_dim_2)`. Can be used to condition the model on a different set of embeddings to `encoder_hidden_states`. encoder_attention_mask_1 (`torch.Tensor`, *optional*): A cross-attention mask of shape `(batch, sequence_length_2)` is applied to `encoder_hidden_states_1`. If `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. Returns: [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise a `tuple` is returned where the first element is the sample tensor. """ # By default samples have to be AT least a multiple of the overall upsampling factor. # The overall upsampling factor is equal to 2 ** (# num of upsampling layers). # However, the upsampling interpolation output size can be forced to fit any upsampling size # on the fly if necessary. default_overall_up_factor = 2**self.num_upsamplers # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` forward_upsample_size = False upsample_size = None if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): logger.info("Forward upsample size to force interpolation output size.") forward_upsample_size = True # ensure attention_mask is a bias, and give it a singleton query_tokens dimension # expects mask of shape: # [batch, key_tokens] # adds singleton query_tokens dimension: # [batch, 1, key_tokens] # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) if attention_mask is not None: # assume that mask is expressed as: # (1 = keep, 0 = discard) # convert mask into a bias that can be added to attention scores: # (keep = +0, discard = -10000.0) attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # convert encoder_attention_mask to a bias the same way we do for attention_mask if encoder_attention_mask is not None: encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) if encoder_attention_mask_1 is not None: encoder_attention_mask_1 = (1 - encoder_attention_mask_1.to(sample.dtype)) * -10000.0 encoder_attention_mask_1 = encoder_attention_mask_1.unsqueeze(1) # 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" is_npu = sample.device.type == "npu" if isinstance(timestep, float): dtype = torch.float32 if (is_mps or is_npu) else torch.float64 else: dtype = torch.int32 if (is_mps or is_npu) else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) # `Timesteps` does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb, timestep_cond) aug_emb = None if self.class_embedding is not None: if class_labels is None: raise ValueError("class_labels should be provided when num_class_embeds > 0") if self.config.class_embed_type == "timestep": class_labels = self.time_proj(class_labels) # `Timesteps` does not contain any weights and will always return f32 tensors # there might be better ways to encapsulate this. class_labels = class_labels.to(dtype=sample.dtype) class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype) if self.config.class_embeddings_concat: emb = torch.cat([emb, class_emb], dim=-1) else: emb = emb + class_emb emb = emb + aug_emb if aug_emb is not None else emb if self.time_embed_act is not None: emb = self.time_embed_act(emb) # 2. pre-process sample = self.conv_in(sample) # 3. down down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, encoder_hidden_states_1=encoder_hidden_states_1, encoder_attention_mask_1=encoder_attention_mask_1, ) else: sample, res_samples = downsample_block(hidden_states=sample, temb=emb) down_block_res_samples += res_samples # 4. mid if self.mid_block is not None: sample = self.mid_block( sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, encoder_hidden_states_1=encoder_hidden_states_1, encoder_attention_mask_1=encoder_attention_mask_1, ) # 5. up for i, upsample_block in enumerate(self.up_blocks): is_final_block = i == len(self.up_blocks) - 1 res_samples = down_block_res_samples[-len(upsample_block.resnets) :] down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] # if we have not reached the final block and need to forward the # upsample size, we do it here if not is_final_block and forward_upsample_size: upsample_size = down_block_res_samples[-1].shape[2:] if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, upsample_size=upsample_size, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, encoder_hidden_states_1=encoder_hidden_states_1, encoder_attention_mask_1=encoder_attention_mask_1, ) else: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size ) # 6. post-process if self.conv_norm_out: sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) if not return_dict: return (sample,) return UNet2DConditionOutput(sample=sample) def get_down_block( down_block_type, num_layers, in_channels, out_channels, temb_channels, add_downsample, resnet_eps, resnet_act_fn, transformer_layers_per_block=1, num_attention_heads=None, resnet_groups=None, cross_attention_dim=None, downsample_padding=None, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, resnet_time_scale_shift="default", ): down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type if down_block_type == "DownBlock2D": return DownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "CrossAttnDownBlock2D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D") return CrossAttnDownBlock2D( num_layers=num_layers, transformer_layers_per_block=transformer_layers_per_block, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, ) raise ValueError(f"{down_block_type} does not exist.") def get_up_block( up_block_type, num_layers, in_channels, out_channels, prev_output_channel, temb_channels, add_upsample, resnet_eps, resnet_act_fn, transformer_layers_per_block=1, num_attention_heads=None, resnet_groups=None, cross_attention_dim=None, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, resnet_time_scale_shift="default", ): up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type if up_block_type == "UpBlock2D": return UpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, ) elif up_block_type == "CrossAttnUpBlock2D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock2D") return CrossAttnUpBlock2D( num_layers=num_layers, transformer_layers_per_block=transformer_layers_per_block, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, ) raise ValueError(f"{up_block_type} does not exist.") class CrossAttnDownBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, transformer_layers_per_block: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads=1, cross_attention_dim=1280, output_scale_factor=1.0, downsample_padding=1, add_downsample=True, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, ): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) if isinstance(cross_attention_dim, (list, tuple)) and len(cross_attention_dim) > 4: raise ValueError( "Only up to 4 cross-attention layers are supported. Ensure that the length of cross-attention " f"dims is less than or equal to 4. Got cross-attention dims {cross_attention_dim} of length {len(cross_attention_dim)}" ) self.cross_attention_dim = cross_attention_dim for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) for j in range(len(cross_attention_dim)): attentions.append( Transformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block, cross_attention_dim=cross_attention_dim[j], norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, double_self_attention=True if cross_attention_dim[j] is None else False, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states_1: Optional[torch.Tensor] = None, encoder_attention_mask_1: Optional[torch.Tensor] = None, ): output_states = () num_layers = len(self.resnets) num_attention_per_layer = len(self.attentions) // num_layers encoder_hidden_states_1 = ( encoder_hidden_states_1 if encoder_hidden_states_1 is not None else encoder_hidden_states ) encoder_attention_mask_1 = ( encoder_attention_mask_1 if encoder_hidden_states_1 is not None else encoder_attention_mask ) for i in range(num_layers): if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(self.resnets[i], hidden_states, temb) for idx, cross_attention_dim in enumerate(self.cross_attention_dim): if cross_attention_dim is not None and idx <= 1: forward_encoder_hidden_states = encoder_hidden_states forward_encoder_attention_mask = encoder_attention_mask elif cross_attention_dim is not None and idx > 1: forward_encoder_hidden_states = encoder_hidden_states_1 forward_encoder_attention_mask = encoder_attention_mask_1 else: forward_encoder_hidden_states = None forward_encoder_attention_mask = None hidden_states = self._gradient_checkpointing_func( self.attentions[i * num_attention_per_layer + idx], hidden_states, forward_encoder_hidden_states, None, # timestep None, # class_labels cross_attention_kwargs, attention_mask, forward_encoder_attention_mask, )[0] else: hidden_states = self.resnets[i](hidden_states, temb) for idx, cross_attention_dim in enumerate(self.cross_attention_dim): if cross_attention_dim is not None and idx <= 1: forward_encoder_hidden_states = encoder_hidden_states forward_encoder_attention_mask = encoder_attention_mask elif cross_attention_dim is not None and idx > 1: forward_encoder_hidden_states = encoder_hidden_states_1 forward_encoder_attention_mask = encoder_attention_mask_1 else: forward_encoder_hidden_states = None forward_encoder_attention_mask = None hidden_states = self.attentions[i * num_attention_per_layer + idx]( hidden_states, attention_mask=attention_mask, encoder_hidden_states=forward_encoder_hidden_states, encoder_attention_mask=forward_encoder_attention_mask, return_dict=False, )[0] output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states = output_states + (hidden_states,) return hidden_states, output_states class UNetMidBlock2DCrossAttn(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, transformer_layers_per_block: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads=1, output_scale_factor=1.0, cross_attention_dim=1280, use_linear_projection=False, upcast_attention=False, ): super().__init__() self.has_cross_attention = True self.num_attention_heads = num_attention_heads resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) if isinstance(cross_attention_dim, (list, tuple)) and len(cross_attention_dim) > 4: raise ValueError( "Only up to 4 cross-attention layers are supported. Ensure that the length of cross-attention " f"dims is less than or equal to 4. Got cross-attention dims {cross_attention_dim} of length {len(cross_attention_dim)}" ) self.cross_attention_dim = cross_attention_dim # there is always at least one resnet resnets = [ ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ] attentions = [] for i in range(num_layers): for j in range(len(cross_attention_dim)): attentions.append( Transformer2DModel( num_attention_heads, in_channels // num_attention_heads, in_channels=in_channels, num_layers=transformer_layers_per_block, cross_attention_dim=cross_attention_dim[j], norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, double_self_attention=True if cross_attention_dim[j] is None else False, ) ) resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states_1: Optional[torch.Tensor] = None, encoder_attention_mask_1: Optional[torch.Tensor] = None, ) -> torch.Tensor: hidden_states = self.resnets[0](hidden_states, temb) num_attention_per_layer = len(self.attentions) // (len(self.resnets) - 1) encoder_hidden_states_1 = ( encoder_hidden_states_1 if encoder_hidden_states_1 is not None else encoder_hidden_states ) encoder_attention_mask_1 = ( encoder_attention_mask_1 if encoder_hidden_states_1 is not None else encoder_attention_mask ) for i in range(len(self.resnets[1:])): if torch.is_grad_enabled() and self.gradient_checkpointing: for idx, cross_attention_dim in enumerate(self.cross_attention_dim): if cross_attention_dim is not None and idx <= 1: forward_encoder_hidden_states = encoder_hidden_states forward_encoder_attention_mask = encoder_attention_mask elif cross_attention_dim is not None and idx > 1: forward_encoder_hidden_states = encoder_hidden_states_1 forward_encoder_attention_mask = encoder_attention_mask_1 else: forward_encoder_hidden_states = None forward_encoder_attention_mask = None hidden_states = self._gradient_checkpointing_func( self.attentions[i * num_attention_per_layer + idx], hidden_states, forward_encoder_hidden_states, None, # timestep None, # class_labels cross_attention_kwargs, attention_mask, forward_encoder_attention_mask, )[0] hidden_states = self._gradient_checkpointing_func(self.resnets[i + 1], hidden_states, temb) else: for idx, cross_attention_dim in enumerate(self.cross_attention_dim): if cross_attention_dim is not None and idx <= 1: forward_encoder_hidden_states = encoder_hidden_states forward_encoder_attention_mask = encoder_attention_mask elif cross_attention_dim is not None and idx > 1: forward_encoder_hidden_states = encoder_hidden_states_1 forward_encoder_attention_mask = encoder_attention_mask_1 else: forward_encoder_hidden_states = None forward_encoder_attention_mask = None hidden_states = self.attentions[i * num_attention_per_layer + idx]( hidden_states, attention_mask=attention_mask, encoder_hidden_states=forward_encoder_hidden_states, encoder_attention_mask=forward_encoder_attention_mask, return_dict=False, )[0] hidden_states = self.resnets[i + 1](hidden_states, temb) return hidden_states class CrossAttnUpBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, transformer_layers_per_block: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads=1, cross_attention_dim=1280, output_scale_factor=1.0, add_upsample=True, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, ): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) if isinstance(cross_attention_dim, (list, tuple)) and len(cross_attention_dim) > 4: raise ValueError( "Only up to 4 cross-attention layers are supported. Ensure that the length of cross-attention " f"dims is less than or equal to 4. Got cross-attention dims {cross_attention_dim} of length {len(cross_attention_dim)}" ) self.cross_attention_dim = cross_attention_dim for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) for j in range(len(cross_attention_dim)): attentions.append( Transformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block, cross_attention_dim=cross_attention_dim[j], norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, double_self_attention=True if cross_attention_dim[j] is None else False, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, upsample_size: Optional[int] = None, attention_mask: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states_1: Optional[torch.Tensor] = None, encoder_attention_mask_1: Optional[torch.Tensor] = None, ): num_layers = len(self.resnets) num_attention_per_layer = len(self.attentions) // num_layers encoder_hidden_states_1 = ( encoder_hidden_states_1 if encoder_hidden_states_1 is not None else encoder_hidden_states ) encoder_attention_mask_1 = ( encoder_attention_mask_1 if encoder_hidden_states_1 is not None else encoder_attention_mask ) for i in range(num_layers): # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(self.resnets[i], hidden_states, temb) for idx, cross_attention_dim in enumerate(self.cross_attention_dim): if cross_attention_dim is not None and idx <= 1: forward_encoder_hidden_states = encoder_hidden_states forward_encoder_attention_mask = encoder_attention_mask elif cross_attention_dim is not None and idx > 1: forward_encoder_hidden_states = encoder_hidden_states_1 forward_encoder_attention_mask = encoder_attention_mask_1 else: forward_encoder_hidden_states = None forward_encoder_attention_mask = None hidden_states = self._gradient_checkpointing_func( self.attentions[i * num_attention_per_layer + idx], hidden_states, forward_encoder_hidden_states, None, # timestep None, # class_labels cross_attention_kwargs, attention_mask, forward_encoder_attention_mask, )[0] else: hidden_states = self.resnets[i](hidden_states, temb) for idx, cross_attention_dim in enumerate(self.cross_attention_dim): if cross_attention_dim is not None and idx <= 1: forward_encoder_hidden_states = encoder_hidden_states forward_encoder_attention_mask = encoder_attention_mask elif cross_attention_dim is not None and idx > 1: forward_encoder_hidden_states = encoder_hidden_states_1 forward_encoder_attention_mask = encoder_attention_mask_1 else: forward_encoder_hidden_states = None forward_encoder_attention_mask = None hidden_states = self.attentions[i * num_attention_per_layer + idx]( hidden_states, attention_mask=attention_mask, encoder_hidden_states=forward_encoder_hidden_states, encoder_attention_mask=forward_encoder_attention_mask, return_dict=False, )[0] if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states
diffusers/src/diffusers/pipelines/audioldm2/modeling_audioldm2.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/audioldm2/modeling_audioldm2.py", "repo_id": "diffusers", "token_count": 32815 }
169
# Copyright 2025 ConsisID Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import math from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL import torch from transformers import T5EncoderModel, T5Tokenizer from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput from ...loaders import CogVideoXLoraLoaderMixin from ...models import AutoencoderKLCogVideoX, ConsisIDTransformer3DModel from ...models.embeddings import get_3d_rotary_pos_embed from ...pipelines.pipeline_utils import DiffusionPipeline from ...schedulers import CogVideoXDPMScheduler from ...utils import is_opencv_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from .pipeline_output import ConsisIDPipelineOutput if is_opencv_available(): import cv2 logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> from diffusers import ConsisIDPipeline >>> from diffusers.pipelines.consisid.consisid_utils import prepare_face_models, process_face_embeddings_infer >>> from diffusers.utils import export_to_video >>> from huggingface_hub import snapshot_download >>> snapshot_download(repo_id="BestWishYsh/ConsisID-preview", local_dir="BestWishYsh/ConsisID-preview") >>> ( ... face_helper_1, ... face_helper_2, ... face_clip_model, ... face_main_model, ... eva_transform_mean, ... eva_transform_std, ... ) = prepare_face_models("BestWishYsh/ConsisID-preview", device="cuda", dtype=torch.bfloat16) >>> pipe = ConsisIDPipeline.from_pretrained("BestWishYsh/ConsisID-preview", torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> # ConsisID works well with long and well-described prompts. Make sure the face in the image is clearly visible (e.g., preferably half-body or full-body). >>> prompt = "The video captures a boy walking along a city street, filmed in black and white on a classic 35mm camera. His expression is thoughtful, his brow slightly furrowed as if he's lost in contemplation. The film grain adds a textured, timeless quality to the image, evoking a sense of nostalgia. Around him, the cityscape is filled with vintage buildings, cobblestone sidewalks, and softly blurred figures passing by, their outlines faint and indistinct. Streetlights cast a gentle glow, while shadows play across the boy's path, adding depth to the scene. The lighting highlights the boy's subtle smile, hinting at a fleeting moment of curiosity. The overall cinematic atmosphere, complete with classic film still aesthetics and dramatic contrasts, gives the scene an evocative and introspective feel." >>> image = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_input.png?download=true" >>> id_cond, id_vit_hidden, image, face_kps = process_face_embeddings_infer( ... face_helper_1, ... face_clip_model, ... face_helper_2, ... eva_transform_mean, ... eva_transform_std, ... face_main_model, ... "cuda", ... torch.bfloat16, ... image, ... is_align_face=True, ... ) >>> video = pipe( ... image=image, ... prompt=prompt, ... num_inference_steps=50, ... guidance_scale=6.0, ... use_dynamic_cfg=False, ... id_vit_hidden=id_vit_hidden, ... id_cond=id_cond, ... kps_cond=face_kps, ... generator=torch.Generator("cuda").manual_seed(42), ... ) >>> export_to_video(video.frames[0], "output.mp4", fps=8) ``` """ def draw_kps(image_pil, kps, color_list=[(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255)]): """ This function draws keypoints and the limbs connecting them on an image. Parameters: - image_pil (PIL.Image): Input image as a PIL object. - kps (list of tuples): A list of keypoints where each keypoint is a tuple of (x, y) coordinates. - color_list (list of tuples, optional): List of colors (in RGB format) for each keypoint. Default is a set of five colors. Returns: - PIL.Image: Image with the keypoints and limbs drawn. """ stickwidth = 4 limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]]) kps = np.array(kps) w, h = image_pil.size out_img = np.zeros([h, w, 3]) for i in range(len(limbSeq)): index = limbSeq[i] color = color_list[index[0]] x = kps[index][:, 0] y = kps[index][:, 1] length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5 angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1])) polygon = cv2.ellipse2Poly( (int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1 ) out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color) out_img = (out_img * 0.6).astype(np.uint8) for idx_kp, kp in enumerate(kps): color = color_list[idx_kp] x, y = kp out_img = cv2.circle(out_img.copy(), (int(x), int(y)), 10, color, -1) out_img_pil = PIL.Image.fromarray(out_img.astype(np.uint8)) return out_img_pil # Similar to diffusers.pipelines.hunyuandit.pipeline_hunyuandit.get_resize_crop_region_for_grid def get_resize_crop_region_for_grid(src, tgt_width, tgt_height): """ This function calculates the resize and crop region for an image to fit a target width and height while preserving the aspect ratio. Parameters: - src (tuple): A tuple containing the source image's height (h) and width (w). - tgt_width (int): The target width to resize the image. - tgt_height (int): The target height to resize the image. Returns: - tuple: Two tuples representing the crop region: 1. The top-left coordinates of the crop region. 2. The bottom-right coordinates of the crop region. """ tw = tgt_width th = tgt_height h, w = src r = h / w if r > (th / tw): resize_height = th resize_width = int(round(th / h * w)) else: resize_width = tw resize_height = int(round(tw / w * h)) crop_top = int(round((th - resize_height) / 2.0)) crop_left = int(round((tw - resize_width) / 2.0)) return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class ConsisIDPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): r""" Pipeline for image-to-video generation using ConsisID. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. text_encoder ([`T5EncoderModel`]): Frozen text-encoder. ConsisID uses [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel); specifically the [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant. tokenizer (`T5Tokenizer`): Tokenizer of class [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). transformer ([`ConsisIDTransformer3DModel`]): A text conditioned `ConsisIDTransformer3DModel` to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `transformer` to denoise the encoded video latents. """ _optional_components = [] model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", ] def __init__( self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, vae: AutoencoderKLCogVideoX, transformer: ConsisIDTransformer3DModel, scheduler: CogVideoXDPMScheduler, ): super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler, ) self.vae_scale_factor_spatial = ( 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 ) self.vae_scale_factor_temporal = ( self.vae.config.temporal_compression_ratio if hasattr(self, "vae") and self.vae is not None else 4 ) self.vae_scaling_factor_image = ( self.vae.config.scaling_factor if hasattr(self, "vae") and self.vae is not None else 0.7 ) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) # Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline._get_t5_prompt_embeds def _get_t5_prompt_embeds( self, prompt: Union[str, List[str]] = None, num_videos_per_prompt: int = 1, max_sequence_length: int = 226, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {max_sequence_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder(text_input_ids.to(device))[0] prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) return prompt_embeds # Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.encode_prompt def encode_prompt( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, max_sequence_length: int = 226, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds = self._get_t5_prompt_embeds( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds = self._get_t5_prompt_embeds( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) return prompt_embeds, negative_prompt_embeds def prepare_latents( self, image: torch.Tensor, batch_size: int = 1, num_channels_latents: int = 16, num_frames: int = 13, height: int = 60, width: int = 90, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, kps_cond: Optional[torch.Tensor] = None, ): if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) num_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 shape = ( batch_size, num_frames, num_channels_latents, height // self.vae_scale_factor_spatial, width // self.vae_scale_factor_spatial, ) image = image.unsqueeze(2) # [B, C, F, H, W] if isinstance(generator, list): image_latents = [ retrieve_latents(self.vae.encode(image[i].unsqueeze(0)), generator[i]) for i in range(batch_size) ] if kps_cond is not None: kps_cond = kps_cond.unsqueeze(2) kps_cond_latents = [ retrieve_latents(self.vae.encode(kps_cond[i].unsqueeze(0)), generator[i]) for i in range(batch_size) ] else: image_latents = [retrieve_latents(self.vae.encode(img.unsqueeze(0)), generator) for img in image] if kps_cond is not None: kps_cond = kps_cond.unsqueeze(2) kps_cond_latents = [retrieve_latents(self.vae.encode(img.unsqueeze(0)), generator) for img in kps_cond] image_latents = torch.cat(image_latents, dim=0).to(dtype).permute(0, 2, 1, 3, 4) # [B, F, C, H, W] image_latents = self.vae_scaling_factor_image * image_latents if kps_cond is not None: kps_cond_latents = torch.cat(kps_cond_latents, dim=0).to(dtype).permute(0, 2, 1, 3, 4) # [B, F, C, H, W] kps_cond_latents = self.vae_scaling_factor_image * kps_cond_latents padding_shape = ( batch_size, num_frames - 2, num_channels_latents, height // self.vae_scale_factor_spatial, width // self.vae_scale_factor_spatial, ) else: padding_shape = ( batch_size, num_frames - 1, num_channels_latents, height // self.vae_scale_factor_spatial, width // self.vae_scale_factor_spatial, ) latent_padding = torch.zeros(padding_shape, device=device, dtype=dtype) if kps_cond is not None: image_latents = torch.cat([image_latents, kps_cond_latents, latent_padding], dim=1) else: image_latents = torch.cat([image_latents, latent_padding], dim=1) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents, image_latents # Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.decode_latents def decode_latents(self, latents: torch.Tensor) -> torch.Tensor: latents = latents.permute(0, 2, 1, 3, 4) # [batch_size, num_channels, num_frames, height, width] latents = 1 / self.vae_scaling_factor_image * latents frames = self.vae.decode(latents).sample return frames # Copied from diffusers.pipelines.animatediff.pipeline_animatediff_video2video.AnimateDiffVideoToVideoPipeline.get_timesteps def get_timesteps(self, num_inference_steps, timesteps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = timesteps[t_start * self.scheduler.order :] return timesteps, num_inference_steps - t_start # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, image, prompt, height, width, negative_prompt, callback_on_step_end_tensor_inputs, latents=None, prompt_embeds=None, negative_prompt_embeds=None, ): if ( not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image) and not isinstance(image, list) ): raise ValueError( "`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" f" {type(image)}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) def _prepare_rotary_positional_embeddings( self, height: int, width: int, num_frames: int, device: torch.device, ) -> Tuple[torch.Tensor, torch.Tensor]: grid_height = height // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) base_size_width = self.transformer.config.sample_width // self.transformer.config.patch_size base_size_height = self.transformer.config.sample_height // self.transformer.config.patch_size grid_crops_coords = get_resize_crop_region_for_grid( (grid_height, grid_width), base_size_width, base_size_height ) freqs_cos, freqs_sin = get_3d_rotary_pos_embed( embed_dim=self.transformer.config.attention_head_dim, crops_coords=grid_crops_coords, grid_size=(grid_height, grid_width), temporal_size=num_frames, device=device, ) return freqs_cos, freqs_sin @property def guidance_scale(self): return self._guidance_scale @property def num_timesteps(self): return self._num_timesteps @property def attention_kwargs(self): return self._attention_kwargs @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: PipelineImageInput, prompt: Optional[Union[str, List[str]]] = None, negative_prompt: Optional[Union[str, List[str]]] = None, height: int = 480, width: int = 720, num_frames: int = 49, num_inference_steps: int = 50, guidance_scale: float = 6.0, use_dynamic_cfg: bool = False, num_videos_per_prompt: int = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: str = "pil", return_dict: bool = True, attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 226, id_vit_hidden: Optional[torch.Tensor] = None, id_cond: Optional[torch.Tensor] = None, kps_cond: Optional[torch.Tensor] = None, ) -> Union[ConsisIDPipelineOutput, Tuple]: """ Function invoked when calling the pipeline for generation. Args: image (`PipelineImageInput`): The input image to condition the generation on. Must be an image, a list of images or a `torch.Tensor`. prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). height (`int`, *optional*, defaults to self.transformer.config.sample_height * self.vae_scale_factor_spatial): The height in pixels of the generated image. This is set to 480 by default for the best results. width (`int`, *optional*, defaults to self.transformer.config.sample_height * self.vae_scale_factor_spatial): The width in pixels of the generated image. This is set to 720 by default for the best results. num_frames (`int`, defaults to `49`): Number of frames to generate. Must be divisible by self.vae_scale_factor_temporal. Generated video will contain 1 extra frame because ConsisID is conditioned with (num_seconds * fps + 1) frames where num_seconds is 6 and fps is 4. However, since videos can be saved at any fps, the only condition that needs to be satisfied is that of divisibility mentioned above. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 6): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. use_dynamic_cfg (`bool`, *optional*, defaults to `False`): If True, dynamically adjusts the guidance scale during inference. This allows the model to use a progressive guidance scale, improving the balance between text-guided generation and image quality over the course of the inference steps. Typically, early inference steps use a higher guidance scale for more faithful image generation, while later steps reduce it for more diverse and natural results. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, defaults to `226`): Maximum sequence length in encoded prompt. Must be consistent with `self.transformer.config.max_text_seq_length` otherwise may lead to poor results. id_vit_hidden (`Optional[torch.Tensor]`, *optional*): The tensor representing the hidden features extracted from the face model, which are used to condition the local facial extractor. This is crucial for the model to obtain high-frequency information of the face. If not provided, the local facial extractor will not run normally. id_cond (`Optional[torch.Tensor]`, *optional*): The tensor representing the hidden features extracted from the clip model, which are used to condition the local facial extractor. This is crucial for the model to edit facial features If not provided, the local facial extractor will not run normally. kps_cond (`Optional[torch.Tensor]`, *optional*): A tensor that determines whether the global facial extractor use keypoint information for conditioning. If provided, this tensor controls whether facial keypoints such as eyes, nose, and mouth landmarks are used during the generation process. This helps ensure the model retains more facial low-frequency information. Examples: Returns: [`~pipelines.consisid.pipeline_output.ConsisIDPipelineOutput`] or `tuple`: [`~pipelines.consisid.pipeline_output.ConsisIDPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.transformer.config.sample_height * self.vae_scale_factor_spatial width = width or self.transformer.config.sample_width * self.vae_scale_factor_spatial num_frames = num_frames or self.transformer.config.sample_frames num_videos_per_prompt = 1 # 1. Check inputs. Raise error if not correct self.check_inputs( image=image, prompt=prompt, height=height, width=width, negative_prompt=negative_prompt, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, latents=latents, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._interrupt = False # 2. Default call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_sequence_length=max_sequence_length, device=device, ) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) # 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device) self._num_timesteps = len(timesteps) # 5. Prepare latents is_kps = getattr(self.transformer.config, "is_kps", False) kps_cond = kps_cond if is_kps else None if kps_cond is not None: kps_cond = draw_kps(image, kps_cond) kps_cond = self.video_processor.preprocess(kps_cond, height=height, width=width).to( device, dtype=prompt_embeds.dtype ) image = self.video_processor.preprocess(image, height=height, width=width).to( device, dtype=prompt_embeds.dtype ) latent_channels = self.transformer.config.in_channels // 2 latents, image_latents = self.prepare_latents( image, batch_size * num_videos_per_prompt, latent_channels, num_frames, height, width, prompt_embeds.dtype, device, generator, latents, kps_cond, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Create rotary embeds if required image_rotary_emb = ( self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device) if self.transformer.config.use_rotary_positional_embeddings else None ) # 8. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) with self.progress_bar(total=num_inference_steps) as progress_bar: # for DPM-solver++ old_pred_original_sample = None timesteps_cpu = timesteps.cpu() for i, t in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) latent_image_input = torch.cat([image_latents] * 2) if do_classifier_free_guidance else image_latents latent_model_input = torch.cat([latent_model_input, latent_image_input], dim=2) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]) # predict noise model_output noise_pred = self.transformer( hidden_states=latent_model_input, encoder_hidden_states=prompt_embeds, timestep=timestep, image_rotary_emb=image_rotary_emb, attention_kwargs=attention_kwargs, return_dict=False, id_vit_hidden=id_vit_hidden, id_cond=id_cond, )[0] noise_pred = noise_pred.float() # perform guidance if use_dynamic_cfg: self._guidance_scale = 1 + guidance_scale * ( ( 1 - math.cos( math.pi * ((num_inference_steps - timesteps_cpu[i].item()) / num_inference_steps) ** 5.0 ) ) / 2 ) if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 if not isinstance(self.scheduler, CogVideoXDPMScheduler): latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] else: latents, old_pred_original_sample = self.scheduler.step( noise_pred, old_pred_original_sample, t, timesteps[i - 1] if i > 0 else None, latents, **extra_step_kwargs, return_dict=False, ) latents = latents.to(prompt_embeds.dtype) # call the callback, if provided if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if not output_type == "latent": video = self.decode_latents(latents) video = self.video_processor.postprocess_video(video=video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return ConsisIDPipelineOutput(frames=video)
diffusers/src/diffusers/pipelines/consisid/pipeline_consisid.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/consisid/pipeline_consisid.py", "repo_id": "diffusers", "token_count": 20581 }
170
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from functools import partial from typing import Dict, List, Optional, Union import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict from flax.jax_utils import unreplicate from flax.training.common_utils import shard from PIL import Image from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel from ...models import FlaxAutoencoderKL, FlaxControlNetModel, FlaxUNet2DConditionModel from ...schedulers import ( FlaxDDIMScheduler, FlaxDPMSolverMultistepScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, ) from ...utils import PIL_INTERPOLATION, logging, replace_example_docstring from ..pipeline_flax_utils import FlaxDiffusionPipeline from ..stable_diffusion import FlaxStableDiffusionPipelineOutput from ..stable_diffusion.safety_checker_flax import FlaxStableDiffusionSafetyChecker logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Set to True to use python for loop instead of jax.fori_loop for easier debugging DEBUG = False EXAMPLE_DOC_STRING = """ Examples: ```py >>> import jax >>> import numpy as np >>> import jax.numpy as jnp >>> from flax.jax_utils import replicate >>> from flax.training.common_utils import shard >>> from diffusers.utils import load_image, make_image_grid >>> from PIL import Image >>> from diffusers import FlaxStableDiffusionControlNetPipeline, FlaxControlNetModel >>> def create_key(seed=0): ... return jax.random.PRNGKey(seed) >>> rng = create_key(0) >>> # get canny image >>> canny_image = load_image( ... "https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/blog_post_cell_10_output_0.jpeg" ... ) >>> prompts = "best quality, extremely detailed" >>> negative_prompts = "monochrome, lowres, bad anatomy, worst quality, low quality" >>> # load control net and stable diffusion v1-5 >>> controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( ... "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.float32 ... ) >>> pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( ... "stable-diffusion-v1-5/stable-diffusion-v1-5", ... controlnet=controlnet, ... revision="flax", ... dtype=jnp.float32, ... ) >>> params["controlnet"] = controlnet_params >>> num_samples = jax.device_count() >>> rng = jax.random.split(rng, jax.device_count()) >>> prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) >>> negative_prompt_ids = pipe.prepare_text_inputs([negative_prompts] * num_samples) >>> processed_image = pipe.prepare_image_inputs([canny_image] * num_samples) >>> p_params = replicate(params) >>> prompt_ids = shard(prompt_ids) >>> negative_prompt_ids = shard(negative_prompt_ids) >>> processed_image = shard(processed_image) >>> output = pipe( ... prompt_ids=prompt_ids, ... image=processed_image, ... params=p_params, ... prng_seed=rng, ... num_inference_steps=50, ... neg_prompt_ids=negative_prompt_ids, ... jit=True, ... ).images >>> output_images = pipe.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:]))) >>> output_images = make_image_grid(output_images, num_samples // 4, 4) >>> output_images.save("generated_image.png") ``` """ class FlaxStableDiffusionControlNetPipeline(FlaxDiffusionPipeline): r""" Flax-based pipeline for text-to-image generation using Stable Diffusion with ControlNet Guidance. This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`FlaxAutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.FlaxCLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`FlaxUNet2DConditionModel`]): A `FlaxUNet2DConditionModel` to denoise the encoded image latents. controlnet ([`FlaxControlNetModel`]: Provides additional conditioning to the `unet` during the denoising process. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or [`FlaxDPMSolverMultistepScheduler`]. safety_checker ([`FlaxStableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ def __init__( self, vae: FlaxAutoencoderKL, text_encoder: FlaxCLIPTextModel, tokenizer: CLIPTokenizer, unet: FlaxUNet2DConditionModel, controlnet: FlaxControlNetModel, scheduler: Union[ FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler ], safety_checker: FlaxStableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, dtype: jnp.dtype = jnp.float32, ): super().__init__() self.dtype = dtype if safety_checker is None: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 def prepare_text_inputs(self, prompt: Union[str, List[str]]): if not isinstance(prompt, (str, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") text_input = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="np", ) return text_input.input_ids def prepare_image_inputs(self, image: Union[Image.Image, List[Image.Image]]): if not isinstance(image, (Image.Image, list)): raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}") if isinstance(image, Image.Image): image = [image] processed_images = jnp.concatenate([preprocess(img, jnp.float32) for img in image]) return processed_images def _get_has_nsfw_concepts(self, features, params): has_nsfw_concepts = self.safety_checker(features, params) return has_nsfw_concepts def _run_safety_checker(self, images, safety_model_params, jit=False): # safety_model_params should already be replicated when jit is True pil_images = [Image.fromarray(image) for image in images] features = self.feature_extractor(pil_images, return_tensors="np").pixel_values if jit: features = shard(features) has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params) has_nsfw_concepts = unshard(has_nsfw_concepts) safety_model_params = unreplicate(safety_model_params) else: has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params) images_was_copied = False for idx, has_nsfw_concept in enumerate(has_nsfw_concepts): if has_nsfw_concept: if not images_was_copied: images_was_copied = True images = images.copy() images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image if any(has_nsfw_concepts): warnings.warn( "Potential NSFW content was detected in one or more images. A black image will be returned" " instead. Try again with a different prompt and/or seed." ) return images, has_nsfw_concepts def _generate( self, prompt_ids: jnp.ndarray, image: jnp.ndarray, params: Union[Dict, FrozenDict], prng_seed: jax.Array, num_inference_steps: int, guidance_scale: float, latents: Optional[jnp.ndarray] = None, neg_prompt_ids: Optional[jnp.ndarray] = None, controlnet_conditioning_scale: float = 1.0, ): height, width = image.shape[-2:] if height % 64 != 0 or width % 64 != 0: raise ValueError(f"`height` and `width` have to be divisible by 64 but are {height} and {width}.") # get prompt text embeddings prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0] # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0` # implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0` batch_size = prompt_ids.shape[0] max_length = prompt_ids.shape[-1] if neg_prompt_ids is None: uncond_input = self.tokenizer( [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np" ).input_ids else: uncond_input = neg_prompt_ids negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0] context = jnp.concatenate([negative_prompt_embeds, prompt_embeds]) image = jnp.concatenate([image] * 2) latents_shape = ( batch_size, self.unet.config.in_channels, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if latents is None: latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") def loop_body(step, args): latents, scheduler_state = args # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes latents_input = jnp.concatenate([latents] * 2) t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] timestep = jnp.broadcast_to(t, latents_input.shape[0]) latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) down_block_res_samples, mid_block_res_sample = self.controlnet.apply( {"params": params["controlnet"]}, jnp.array(latents_input), jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=context, controlnet_cond=image, conditioning_scale=controlnet_conditioning_scale, return_dict=False, ) # predict the noise residual noise_pred = self.unet.apply( {"params": params["unet"]}, jnp.array(latents_input), jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=context, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, ).sample # perform guidance noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0) noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() return latents, scheduler_state scheduler_state = self.scheduler.set_timesteps( params["scheduler"], num_inference_steps=num_inference_steps, shape=latents_shape ) # scale the initial noise by the standard deviation required by the scheduler latents = latents * params["scheduler"].init_noise_sigma if DEBUG: # run with python for loop for i in range(num_inference_steps): latents, scheduler_state = loop_body(i, (latents, scheduler_state)) else: latents, _ = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, scheduler_state)) # scale and decode the image latents with vae latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) return image @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt_ids: jnp.ndarray, image: jnp.ndarray, params: Union[Dict, FrozenDict], prng_seed: jax.Array, num_inference_steps: int = 50, guidance_scale: Union[float, jnp.ndarray] = 7.5, latents: jnp.ndarray = None, neg_prompt_ids: jnp.ndarray = None, controlnet_conditioning_scale: Union[float, jnp.ndarray] = 1.0, return_dict: bool = True, jit: bool = False, ): r""" The call function to the pipeline for generation. Args: prompt_ids (`jnp.ndarray`): The prompt or prompts to guide the image generation. image (`jnp.ndarray`): Array representing the ControlNet input condition to provide guidance to the `unet` for generation. params (`Dict` or `FrozenDict`): Dictionary containing the model parameters/weights. prng_seed (`jax.Array`): Array containing random number generator key. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. latents (`jnp.ndarray`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents array is generated by sampling using the supplied random `generator`. controlnet_conditioning_scale (`float` or `jnp.ndarray`, *optional*, defaults to 1.0): The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original `unet`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of a plain tuple. jit (`bool`, defaults to `False`): Whether to run `pmap` versions of the generation and safety scoring functions. <Tip warning={true}> This argument exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a future release. </Tip> Examples: Returns: [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ height, width = image.shape[-2:] if isinstance(guidance_scale, float): # Convert to a tensor so each device gets a copy. Follow the prompt_ids for # shape information, as they may be sharded (when `jit` is `True`), or not. guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) if len(prompt_ids.shape) > 2: # Assume sharded guidance_scale = guidance_scale[:, None] if isinstance(controlnet_conditioning_scale, float): # Convert to a tensor so each device gets a copy. Follow the prompt_ids for # shape information, as they may be sharded (when `jit` is `True`), or not. controlnet_conditioning_scale = jnp.array([controlnet_conditioning_scale] * prompt_ids.shape[0]) if len(prompt_ids.shape) > 2: # Assume sharded controlnet_conditioning_scale = controlnet_conditioning_scale[:, None] if jit: images = _p_generate( self, prompt_ids, image, params, prng_seed, num_inference_steps, guidance_scale, latents, neg_prompt_ids, controlnet_conditioning_scale, ) else: images = self._generate( prompt_ids, image, params, prng_seed, num_inference_steps, guidance_scale, latents, neg_prompt_ids, controlnet_conditioning_scale, ) if self.safety_checker is not None: safety_params = params["safety_checker"] images_uint8_casted = (images * 255).round().astype("uint8") num_devices, batch_size = images.shape[:2] images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3) images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit) images = np.array(images) # block images if any(has_nsfw_concept): for i, is_nsfw in enumerate(has_nsfw_concept): if is_nsfw: images[i] = np.asarray(images_uint8_casted[i]) images = images.reshape(num_devices, batch_size, height, width, 3) else: images = np.asarray(images) has_nsfw_concept = False if not return_dict: return (images, has_nsfw_concept) return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept) # Static argnums are pipe, num_inference_steps. A change would trigger recompilation. # Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`). @partial( jax.pmap, in_axes=(None, 0, 0, 0, 0, None, 0, 0, 0, 0), static_broadcasted_argnums=(0, 5), ) def _p_generate( pipe, prompt_ids, image, params, prng_seed, num_inference_steps, guidance_scale, latents, neg_prompt_ids, controlnet_conditioning_scale, ): return pipe._generate( prompt_ids, image, params, prng_seed, num_inference_steps, guidance_scale, latents, neg_prompt_ids, controlnet_conditioning_scale, ) @partial(jax.pmap, static_broadcasted_argnums=(0,)) def _p_get_has_nsfw_concepts(pipe, features, params): return pipe._get_has_nsfw_concepts(features, params) def unshard(x: jnp.ndarray): # einops.rearrange(x, 'd b ... -> (d b) ...') num_devices, batch_size = x.shape[:2] rest = x.shape[2:] return x.reshape(num_devices * batch_size, *rest) def preprocess(image, dtype): image = image.convert("RGB") w, h = image.size w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 64 image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) image = jnp.array(image).astype(dtype) / 255.0 image = image[None].transpose(0, 3, 1, 2) return image
diffusers/src/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py", "repo_id": "diffusers", "token_count": 10022 }
171
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Tuple, Union import torch from ...models import UNet1DModel from ...schedulers import SchedulerMixin from ...utils import is_torch_xla_available, logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, DeprecatedPipelineMixin, DiffusionPipeline if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name class DanceDiffusionPipeline(DeprecatedPipelineMixin, DiffusionPipeline): r""" Pipeline for audio generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: unet ([`UNet1DModel`]): A `UNet1DModel` to denoise the encoded audio. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of [`IPNDMScheduler`]. """ _last_supported_version = "0.33.1" model_cpu_offload_seq = "unet" def __init__(self, unet: UNet1DModel, scheduler: SchedulerMixin): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, num_inference_steps: int = 100, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, audio_length_in_s: Optional[float] = None, return_dict: bool = True, ) -> Union[AudioPipelineOutput, Tuple]: r""" The call function to the pipeline for generation. Args: batch_size (`int`, *optional*, defaults to 1): The number of audio samples to generate. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher-quality audio sample at the expense of slower inference. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. audio_length_in_s (`float`, *optional*, defaults to `self.unet.config.sample_size/self.unet.config.sample_rate`): The length of the generated audio sample in seconds. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple. Example: ```py from diffusers import DiffusionPipeline from scipy.io.wavfile import write model_id = "harmonai/maestro-150k" pipe = DiffusionPipeline.from_pretrained(model_id) pipe = pipe.to("cuda") audios = pipe(audio_length_in_s=4.0).audios # To save locally for i, audio in enumerate(audios): write(f"maestro_test_{i}.wav", pipe.unet.sample_rate, audio.transpose()) # To display in google colab import IPython.display as ipd for audio in audios: display(ipd.Audio(audio, rate=pipe.unet.sample_rate)) ``` Returns: [`~pipelines.AudioPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated audio. """ if audio_length_in_s is None: audio_length_in_s = self.unet.config.sample_size / self.unet.config.sample_rate sample_size = audio_length_in_s * self.unet.config.sample_rate down_scale_factor = 2 ** len(self.unet.up_blocks) if sample_size < 3 * down_scale_factor: raise ValueError( f"{audio_length_in_s} is too small. Make sure it's bigger or equal to" f" {3 * down_scale_factor / self.unet.config.sample_rate}." ) original_sample_size = int(sample_size) if sample_size % down_scale_factor != 0: sample_size = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled" f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising" " process." ) sample_size = int(sample_size) dtype = next(self.unet.parameters()).dtype shape = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) audio = randn_tensor(shape, generator=generator, device=self._execution_device, dtype=dtype) # set step values self.scheduler.set_timesteps(num_inference_steps, device=audio.device) self.scheduler.timesteps = self.scheduler.timesteps.to(dtype) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output model_output = self.unet(audio, t).sample # 2. compute previous audio sample: x_t -> t_t-1 audio = self.scheduler.step(model_output, t, audio).prev_sample if XLA_AVAILABLE: xm.mark_step() audio = audio.clamp(-1, 1).float().cpu().numpy() audio = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=audio)
diffusers/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py", "repo_id": "diffusers", "token_count": 2803 }
172
# Deprecated Pipelines This folder contains pipelines that have very low usage as measured by model downloads, issues and PRs. While you can still use the pipelines just as before, we will stop testing the pipelines and will not accept any changes to existing files.
diffusers/src/diffusers/pipelines/deprecated/README.md/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/README.md", "repo_id": "diffusers", "token_count": 54 }
173
from typing import TYPE_CHECKING from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {"pipeline_score_sde_ve": ["ScoreSdeVePipeline"]} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_score_sde_ve import ScoreSdeVePipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, )
diffusers/src/diffusers/pipelines/deprecated/score_sde_ve/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/score_sde_ve/__init__.py", "repo_id": "diffusers", "token_count": 193 }
174
from typing import TYPE_CHECKING from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_transformers_available, is_transformers_version, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) _dummy_objects.update( { "VersatileDiffusionDualGuidedPipeline": VersatileDiffusionDualGuidedPipeline, "VersatileDiffusionImageVariationPipeline": VersatileDiffusionImageVariationPipeline, "VersatileDiffusionPipeline": VersatileDiffusionPipeline, "VersatileDiffusionTextToImagePipeline": VersatileDiffusionTextToImagePipeline, } ) else: _import_structure["modeling_text_unet"] = ["UNetFlatConditionModel"] _import_structure["pipeline_versatile_diffusion"] = ["VersatileDiffusionPipeline"] _import_structure["pipeline_versatile_diffusion_dual_guided"] = ["VersatileDiffusionDualGuidedPipeline"] _import_structure["pipeline_versatile_diffusion_image_variation"] = ["VersatileDiffusionImageVariationPipeline"] _import_structure["pipeline_versatile_diffusion_text_to_image"] = ["VersatileDiffusionTextToImagePipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/__init__.py", "repo_id": "diffusers", "token_count": 1112 }
175
# Copyright 2025 Black Forest Labs and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin from ...utils import BaseOutput @dataclass class ReduxImageEncoderOutput(BaseOutput): image_embeds: Optional[torch.Tensor] = None class ReduxImageEncoder(ModelMixin, ConfigMixin): @register_to_config def __init__( self, redux_dim: int = 1152, txt_in_features: int = 4096, ) -> None: super().__init__() self.redux_up = nn.Linear(redux_dim, txt_in_features * 3) self.redux_down = nn.Linear(txt_in_features * 3, txt_in_features) def forward(self, x: torch.Tensor) -> ReduxImageEncoderOutput: projected_x = self.redux_down(nn.functional.silu(self.redux_up(x))) return ReduxImageEncoderOutput(image_embeds=projected_x)
diffusers/src/diffusers/pipelines/flux/modeling_flux.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/flux/modeling_flux.py", "repo_id": "diffusers", "token_count": 519 }
176
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn from ..models.attention import BasicTransformerBlock, FreeNoiseTransformerBlock from ..models.resnet import Downsample2D, ResnetBlock2D, Upsample2D from ..models.transformers.transformer_2d import Transformer2DModel from ..models.unets.unet_motion_model import ( AnimateDiffTransformer3D, CrossAttnDownBlockMotion, DownBlockMotion, UpBlockMotion, ) from ..pipelines.pipeline_utils import DiffusionPipeline from ..utils import logging from ..utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name class SplitInferenceModule(nn.Module): r""" A wrapper module class that splits inputs along a specified dimension before performing a forward pass. This module is useful when you need to perform inference on large tensors in a memory-efficient way by breaking them into smaller chunks, processing each chunk separately, and then reassembling the results. Args: module (`nn.Module`): The underlying PyTorch module that will be applied to each chunk of split inputs. split_size (`int`, defaults to `1`): The size of each chunk after splitting the input tensor. split_dim (`int`, defaults to `0`): The dimension along which the input tensors are split. input_kwargs_to_split (`List[str]`, defaults to `["hidden_states"]`): A list of keyword arguments (strings) that represent the input tensors to be split. Workflow: 1. The keyword arguments specified in `input_kwargs_to_split` are split into smaller chunks using `torch.split()` along the dimension `split_dim` and with a chunk size of `split_size`. 2. The `module` is invoked once for each split with both the split inputs and any unchanged arguments that were passed. 3. The output tensors from each split are concatenated back together along `split_dim` before returning. Example: ```python >>> import torch >>> import torch.nn as nn >>> model = nn.Linear(1000, 1000) >>> split_module = SplitInferenceModule(model, split_size=2, split_dim=0, input_kwargs_to_split=["input"]) >>> input_tensor = torch.randn(42, 1000) >>> # Will split the tensor into 21 slices of shape [2, 1000]. >>> output = split_module(input=input_tensor) ``` It is also possible to nest `SplitInferenceModule` across different split dimensions for more complex multi-dimensional splitting. """ def __init__( self, module: nn.Module, split_size: int = 1, split_dim: int = 0, input_kwargs_to_split: List[str] = ["hidden_states"], ) -> None: super().__init__() self.module = module self.split_size = split_size self.split_dim = split_dim self.input_kwargs_to_split = set(input_kwargs_to_split) def forward(self, *args, **kwargs) -> Union[torch.Tensor, Tuple[torch.Tensor]]: r"""Forward method for the `SplitInferenceModule`. This method processes the input by splitting specified keyword arguments along a given dimension, running the underlying module on each split, and then concatenating the results. The splitting is controlled by the `split_size` and `split_dim` parameters specified during initialization. Args: *args (`Any`): Positional arguments that are passed directly to the `module` without modification. **kwargs (`Dict[str, torch.Tensor]`): Keyword arguments passed to the underlying `module`. Only keyword arguments whose names match the entries in `input_kwargs_to_split` and are of type `torch.Tensor` will be split. The remaining keyword arguments are passed unchanged. Returns: `Union[torch.Tensor, Tuple[torch.Tensor]]`: The outputs obtained from `SplitInferenceModule` are the same as if the underlying module was inferred without it. - If the underlying module returns a single tensor, the result will be a single concatenated tensor along the same `split_dim` after processing all splits. - If the underlying module returns a tuple of tensors, each element of the tuple will be concatenated along the `split_dim` across all splits, and the final result will be a tuple of concatenated tensors. """ split_inputs = {} # 1. Split inputs that were specified during initialization and also present in passed kwargs for key in list(kwargs.keys()): if key not in self.input_kwargs_to_split or not torch.is_tensor(kwargs[key]): continue split_inputs[key] = torch.split(kwargs[key], self.split_size, self.split_dim) kwargs.pop(key) # 2. Invoke forward pass across each split results = [] for split_input in zip(*split_inputs.values()): inputs = dict(zip(split_inputs.keys(), split_input)) inputs.update(kwargs) intermediate_tensor_or_tensor_tuple = self.module(*args, **inputs) results.append(intermediate_tensor_or_tensor_tuple) # 3. Concatenate split restuls to obtain final outputs if isinstance(results[0], torch.Tensor): return torch.cat(results, dim=self.split_dim) elif isinstance(results[0], tuple): return tuple([torch.cat(x, dim=self.split_dim) for x in zip(*results)]) else: raise ValueError( "In order to use the SplitInferenceModule, it is necessary for the underlying `module` to either return a torch.Tensor or a tuple of torch.Tensor's." ) class AnimateDiffFreeNoiseMixin: r"""Mixin class for [FreeNoise](https://huggingface.co/papers/2310.15169).""" def _enable_free_noise_in_block(self, block: Union[CrossAttnDownBlockMotion, DownBlockMotion, UpBlockMotion]): r"""Helper function to enable FreeNoise in transformer blocks.""" for motion_module in block.motion_modules: num_transformer_blocks = len(motion_module.transformer_blocks) for i in range(num_transformer_blocks): if isinstance(motion_module.transformer_blocks[i], FreeNoiseTransformerBlock): motion_module.transformer_blocks[i].set_free_noise_properties( self._free_noise_context_length, self._free_noise_context_stride, self._free_noise_weighting_scheme, ) else: assert isinstance(motion_module.transformer_blocks[i], BasicTransformerBlock) basic_transfomer_block = motion_module.transformer_blocks[i] motion_module.transformer_blocks[i] = FreeNoiseTransformerBlock( dim=basic_transfomer_block.dim, num_attention_heads=basic_transfomer_block.num_attention_heads, attention_head_dim=basic_transfomer_block.attention_head_dim, dropout=basic_transfomer_block.dropout, cross_attention_dim=basic_transfomer_block.cross_attention_dim, activation_fn=basic_transfomer_block.activation_fn, attention_bias=basic_transfomer_block.attention_bias, only_cross_attention=basic_transfomer_block.only_cross_attention, double_self_attention=basic_transfomer_block.double_self_attention, positional_embeddings=basic_transfomer_block.positional_embeddings, num_positional_embeddings=basic_transfomer_block.num_positional_embeddings, context_length=self._free_noise_context_length, context_stride=self._free_noise_context_stride, weighting_scheme=self._free_noise_weighting_scheme, ).to(device=self.device, dtype=self.dtype) motion_module.transformer_blocks[i].load_state_dict( basic_transfomer_block.state_dict(), strict=True ) motion_module.transformer_blocks[i].set_chunk_feed_forward( basic_transfomer_block._chunk_size, basic_transfomer_block._chunk_dim ) def _disable_free_noise_in_block(self, block: Union[CrossAttnDownBlockMotion, DownBlockMotion, UpBlockMotion]): r"""Helper function to disable FreeNoise in transformer blocks.""" for motion_module in block.motion_modules: num_transformer_blocks = len(motion_module.transformer_blocks) for i in range(num_transformer_blocks): if isinstance(motion_module.transformer_blocks[i], FreeNoiseTransformerBlock): free_noise_transfomer_block = motion_module.transformer_blocks[i] motion_module.transformer_blocks[i] = BasicTransformerBlock( dim=free_noise_transfomer_block.dim, num_attention_heads=free_noise_transfomer_block.num_attention_heads, attention_head_dim=free_noise_transfomer_block.attention_head_dim, dropout=free_noise_transfomer_block.dropout, cross_attention_dim=free_noise_transfomer_block.cross_attention_dim, activation_fn=free_noise_transfomer_block.activation_fn, attention_bias=free_noise_transfomer_block.attention_bias, only_cross_attention=free_noise_transfomer_block.only_cross_attention, double_self_attention=free_noise_transfomer_block.double_self_attention, positional_embeddings=free_noise_transfomer_block.positional_embeddings, num_positional_embeddings=free_noise_transfomer_block.num_positional_embeddings, ).to(device=self.device, dtype=self.dtype) motion_module.transformer_blocks[i].load_state_dict( free_noise_transfomer_block.state_dict(), strict=True ) motion_module.transformer_blocks[i].set_chunk_feed_forward( free_noise_transfomer_block._chunk_size, free_noise_transfomer_block._chunk_dim ) def _check_inputs_free_noise( self, prompt, negative_prompt, prompt_embeds, negative_prompt_embeds, num_frames, ) -> None: if not isinstance(prompt, (str, dict)): raise ValueError(f"Expected `prompt` to have type `str` or `dict` but found {type(prompt)=}") if negative_prompt is not None: if not isinstance(negative_prompt, (str, dict)): raise ValueError( f"Expected `negative_prompt` to have type `str` or `dict` but found {type(negative_prompt)=}" ) if prompt_embeds is not None or negative_prompt_embeds is not None: raise ValueError("`prompt_embeds` and `negative_prompt_embeds` is not supported in FreeNoise yet.") frame_indices = [isinstance(x, int) for x in prompt.keys()] frame_prompts = [isinstance(x, str) for x in prompt.values()] min_frame = min(list(prompt.keys())) max_frame = max(list(prompt.keys())) if not all(frame_indices): raise ValueError("Expected integer keys in `prompt` dict for FreeNoise.") if not all(frame_prompts): raise ValueError("Expected str values in `prompt` dict for FreeNoise.") if min_frame != 0: raise ValueError("The minimum frame index in `prompt` dict must be 0 as a starting prompt is necessary.") if max_frame >= num_frames: raise ValueError( f"The maximum frame index in `prompt` dict must be lesser than {num_frames=} and follow 0-based indexing." ) def _encode_prompt_free_noise( self, prompt: Union[str, Dict[int, str]], num_frames: int, device: torch.device, num_videos_per_prompt: int, do_classifier_free_guidance: bool, negative_prompt: Optional[Union[str, Dict[int, str]]] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ) -> torch.Tensor: if negative_prompt is None: negative_prompt = "" # Ensure that we have a dictionary of prompts if isinstance(prompt, str): prompt = {0: prompt} if isinstance(negative_prompt, str): negative_prompt = {0: negative_prompt} self._check_inputs_free_noise(prompt, negative_prompt, prompt_embeds, negative_prompt_embeds, num_frames) # Sort the prompts based on frame indices prompt = dict(sorted(prompt.items())) negative_prompt = dict(sorted(negative_prompt.items())) # Ensure that we have a prompt for the last frame index prompt[num_frames - 1] = prompt[list(prompt.keys())[-1]] negative_prompt[num_frames - 1] = negative_prompt[list(negative_prompt.keys())[-1]] frame_indices = list(prompt.keys()) frame_prompts = list(prompt.values()) frame_negative_indices = list(negative_prompt.keys()) frame_negative_prompts = list(negative_prompt.values()) # Generate and interpolate positive prompts prompt_embeds, _ = self.encode_prompt( prompt=frame_prompts, device=device, num_images_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=False, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, lora_scale=lora_scale, clip_skip=clip_skip, ) shape = (num_frames, *prompt_embeds.shape[1:]) prompt_interpolation_embeds = prompt_embeds.new_zeros(shape) for i in range(len(frame_indices) - 1): start_frame = frame_indices[i] end_frame = frame_indices[i + 1] start_tensor = prompt_embeds[i].unsqueeze(0) end_tensor = prompt_embeds[i + 1].unsqueeze(0) prompt_interpolation_embeds[start_frame : end_frame + 1] = self._free_noise_prompt_interpolation_callback( start_frame, end_frame, start_tensor, end_tensor ) # Generate and interpolate negative prompts negative_prompt_embeds = None negative_prompt_interpolation_embeds = None if do_classifier_free_guidance: _, negative_prompt_embeds = self.encode_prompt( prompt=[""] * len(frame_negative_prompts), device=device, num_images_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=True, negative_prompt=frame_negative_prompts, prompt_embeds=None, negative_prompt_embeds=None, lora_scale=lora_scale, clip_skip=clip_skip, ) negative_prompt_interpolation_embeds = negative_prompt_embeds.new_zeros(shape) for i in range(len(frame_negative_indices) - 1): start_frame = frame_negative_indices[i] end_frame = frame_negative_indices[i + 1] start_tensor = negative_prompt_embeds[i].unsqueeze(0) end_tensor = negative_prompt_embeds[i + 1].unsqueeze(0) negative_prompt_interpolation_embeds[start_frame : end_frame + 1] = ( self._free_noise_prompt_interpolation_callback(start_frame, end_frame, start_tensor, end_tensor) ) prompt_embeds = prompt_interpolation_embeds negative_prompt_embeds = negative_prompt_interpolation_embeds if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds, negative_prompt_embeds def _prepare_latents_free_noise( self, batch_size: int, num_channels_latents: int, num_frames: int, height: int, width: int, dtype: torch.dtype, device: torch.device, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, ): if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) context_num_frames = ( self._free_noise_context_length if self._free_noise_context_length == "repeat_context" else num_frames ) shape = ( batch_size, num_channels_latents, context_num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) if self._free_noise_noise_type == "random": return latents else: if latents.size(2) == num_frames: return latents elif latents.size(2) != self._free_noise_context_length: raise ValueError( f"You have passed `latents` as a parameter to FreeNoise. The expected number of frames is either {num_frames} or {self._free_noise_context_length}, but found {latents.size(2)}" ) latents = latents.to(device) if self._free_noise_noise_type == "shuffle_context": for i in range(self._free_noise_context_length, num_frames, self._free_noise_context_stride): # ensure window is within bounds window_start = max(0, i - self._free_noise_context_length) window_end = min(num_frames, window_start + self._free_noise_context_stride) window_length = window_end - window_start if window_length == 0: break indices = torch.LongTensor(list(range(window_start, window_end))) shuffled_indices = indices[torch.randperm(window_length, generator=generator)] current_start = i current_end = min(num_frames, current_start + window_length) if current_end == current_start + window_length: # batch of frames perfectly fits the window latents[:, :, current_start:current_end] = latents[:, :, shuffled_indices] else: # handle the case where the last batch of frames does not fit perfectly with the window prefix_length = current_end - current_start shuffled_indices = shuffled_indices[:prefix_length] latents[:, :, current_start:current_end] = latents[:, :, shuffled_indices] elif self._free_noise_noise_type == "repeat_context": num_repeats = (num_frames + self._free_noise_context_length - 1) // self._free_noise_context_length latents = torch.cat([latents] * num_repeats, dim=2) latents = latents[:, :, :num_frames] return latents def _lerp( self, start_index: int, end_index: int, start_tensor: torch.Tensor, end_tensor: torch.Tensor ) -> torch.Tensor: num_indices = end_index - start_index + 1 interpolated_tensors = [] for i in range(num_indices): alpha = i / (num_indices - 1) interpolated_tensor = (1 - alpha) * start_tensor + alpha * end_tensor interpolated_tensors.append(interpolated_tensor) interpolated_tensors = torch.cat(interpolated_tensors) return interpolated_tensors def enable_free_noise( self, context_length: Optional[int] = 16, context_stride: int = 4, weighting_scheme: str = "pyramid", noise_type: str = "shuffle_context", prompt_interpolation_callback: Optional[ Callable[[DiffusionPipeline, int, int, torch.Tensor, torch.Tensor], torch.Tensor] ] = None, ) -> None: r""" Enable long video generation using FreeNoise. Args: context_length (`int`, defaults to `16`, *optional*): The number of video frames to process at once. It's recommended to set this to the maximum frames the Motion Adapter was trained with (usually 16/24/32). If `None`, the default value from the motion adapter config is used. context_stride (`int`, *optional*): Long videos are generated by processing many frames. FreeNoise processes these frames in sliding windows of size `context_length`. Context stride allows you to specify how many frames to skip between each window. For example, a context length of 16 and context stride of 4 would process 24 frames as: [0, 15], [4, 19], [8, 23] (0-based indexing) weighting_scheme (`str`, defaults to `pyramid`): Weighting scheme for averaging latents after accumulation in FreeNoise blocks. The following weighting schemes are supported currently: - "flat" Performs weighting averaging with a flat weight pattern: [1, 1, 1, 1, 1]. - "pyramid" Performs weighted averaging with a pyramid like weight pattern: [1, 2, 3, 2, 1]. - "delayed_reverse_sawtooth" Performs weighted averaging with low weights for earlier frames and high-to-low weights for later frames: [0.01, 0.01, 3, 2, 1]. noise_type (`str`, defaults to "shuffle_context"): Must be one of ["shuffle_context", "repeat_context", "random"]. - "shuffle_context" Shuffles a fixed batch of `context_length` latents to create a final latent of size `num_frames`. This is usually the best setting for most generation scenarios. However, there might be visible repetition noticeable in the kinds of motion/animation generated. - "repeated_context" Repeats a fixed batch of `context_length` latents to create a final latent of size `num_frames`. - "random" The final latents are random without any repetition. """ allowed_weighting_scheme = ["flat", "pyramid", "delayed_reverse_sawtooth"] allowed_noise_type = ["shuffle_context", "repeat_context", "random"] if context_length > self.motion_adapter.config.motion_max_seq_length: logger.warning( f"You have set {context_length=} which is greater than {self.motion_adapter.config.motion_max_seq_length=}. This can lead to bad generation results." ) if weighting_scheme not in allowed_weighting_scheme: raise ValueError( f"The parameter `weighting_scheme` must be one of {allowed_weighting_scheme}, but got {weighting_scheme=}" ) if noise_type not in allowed_noise_type: raise ValueError(f"The parameter `noise_type` must be one of {allowed_noise_type}, but got {noise_type=}") self._free_noise_context_length = context_length or self.motion_adapter.config.motion_max_seq_length self._free_noise_context_stride = context_stride self._free_noise_weighting_scheme = weighting_scheme self._free_noise_noise_type = noise_type self._free_noise_prompt_interpolation_callback = prompt_interpolation_callback or self._lerp if hasattr(self.unet.mid_block, "motion_modules"): blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks] else: blocks = [*self.unet.down_blocks, *self.unet.up_blocks] for block in blocks: self._enable_free_noise_in_block(block) def disable_free_noise(self) -> None: r"""Disable the FreeNoise sampling mechanism.""" self._free_noise_context_length = None if hasattr(self.unet.mid_block, "motion_modules"): blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks] else: blocks = [*self.unet.down_blocks, *self.unet.up_blocks] blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks] for block in blocks: self._disable_free_noise_in_block(block) def _enable_split_inference_motion_modules_( self, motion_modules: List[AnimateDiffTransformer3D], spatial_split_size: int ) -> None: for motion_module in motion_modules: motion_module.proj_in = SplitInferenceModule(motion_module.proj_in, spatial_split_size, 0, ["input"]) for i in range(len(motion_module.transformer_blocks)): motion_module.transformer_blocks[i] = SplitInferenceModule( motion_module.transformer_blocks[i], spatial_split_size, 0, ["hidden_states", "encoder_hidden_states"], ) motion_module.proj_out = SplitInferenceModule(motion_module.proj_out, spatial_split_size, 0, ["input"]) def _enable_split_inference_attentions_( self, attentions: List[Transformer2DModel], temporal_split_size: int ) -> None: for i in range(len(attentions)): attentions[i] = SplitInferenceModule( attentions[i], temporal_split_size, 0, ["hidden_states", "encoder_hidden_states"] ) def _enable_split_inference_resnets_(self, resnets: List[ResnetBlock2D], temporal_split_size: int) -> None: for i in range(len(resnets)): resnets[i] = SplitInferenceModule(resnets[i], temporal_split_size, 0, ["input_tensor", "temb"]) def _enable_split_inference_samplers_( self, samplers: Union[List[Downsample2D], List[Upsample2D]], temporal_split_size: int ) -> None: for i in range(len(samplers)): samplers[i] = SplitInferenceModule(samplers[i], temporal_split_size, 0, ["hidden_states"]) def enable_free_noise_split_inference(self, spatial_split_size: int = 256, temporal_split_size: int = 16) -> None: r""" Enable FreeNoise memory optimizations by utilizing [`~diffusers.pipelines.free_noise_utils.SplitInferenceModule`] across different intermediate modeling blocks. Args: spatial_split_size (`int`, defaults to `256`): The split size across spatial dimensions for internal blocks. This is used in facilitating split inference across the effective batch dimension (`[B x H x W, F, C]`) of intermediate tensors in motion modeling blocks. temporal_split_size (`int`, defaults to `16`): The split size across temporal dimensions for internal blocks. This is used in facilitating split inference across the effective batch dimension (`[B x F, H x W, C]`) of intermediate tensors in spatial attention, resnets, downsampling and upsampling blocks. """ # TODO(aryan): Discuss on what's the best way to provide more control to users blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks] for block in blocks: if getattr(block, "motion_modules", None) is not None: self._enable_split_inference_motion_modules_(block.motion_modules, spatial_split_size) if getattr(block, "attentions", None) is not None: self._enable_split_inference_attentions_(block.attentions, temporal_split_size) if getattr(block, "resnets", None) is not None: self._enable_split_inference_resnets_(block.resnets, temporal_split_size) if getattr(block, "downsamplers", None) is not None: self._enable_split_inference_samplers_(block.downsamplers, temporal_split_size) if getattr(block, "upsamplers", None) is not None: self._enable_split_inference_samplers_(block.upsamplers, temporal_split_size) @property def free_noise_enabled(self): return hasattr(self, "_free_noise_context_length") and self._free_noise_context_length is not None
diffusers/src/diffusers/pipelines/free_noise_utils.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/free_noise_utils.py", "repo_id": "diffusers", "token_count": 13044 }
177
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, List, Optional, Union import PIL.Image import torch from transformers import ( CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection, XLMRobertaTokenizer, ) from ...models import PriorTransformer, UNet2DConditionModel, VQModel from ...schedulers import DDIMScheduler, DDPMScheduler, UnCLIPScheduler from ...utils import ( replace_example_docstring, ) from ..pipeline_utils import DiffusionPipeline from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_img2img import KandinskyImg2ImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline from .text_encoder import MultilingualCLIP TEXT2IMAGE_EXAMPLE_DOC_STRING = """ Examples: ```py from diffusers import AutoPipelineForText2Image import torch pipe = AutoPipelineForText2Image.from_pretrained( "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k" image = pipe(prompt=prompt, num_inference_steps=25).images[0] ``` """ IMAGE2IMAGE_EXAMPLE_DOC_STRING = """ Examples: ```py from diffusers import AutoPipelineForImage2Image import torch import requests from io import BytesIO from PIL import Image import os pipe = AutoPipelineForImage2Image.from_pretrained( "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() prompt = "A fantasy landscape, Cinematic lighting" negative_prompt = "low quality, bad quality" url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" response = requests.get(url) image = Image.open(BytesIO(response.content)).convert("RGB") image.thumbnail((768, 768)) image = pipe(prompt=prompt, image=original_image, num_inference_steps=25).images[0] ``` """ INPAINT_EXAMPLE_DOC_STRING = """ Examples: ```py from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image import torch import numpy as np pipe = AutoPipelineForInpainting.from_pretrained( "kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() prompt = "A fantasy landscape, Cinematic lighting" negative_prompt = "low quality, bad quality" original_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) mask = np.zeros((768, 768), dtype=np.float32) # Let's mask out an area above the cat's head mask[:250, 250:-250] = 1 image = pipe(prompt=prompt, image=original_image, mask_image=mask, num_inference_steps=25).images[0] ``` """ class KandinskyCombinedPipeline(DiffusionPipeline): """ Combined Pipeline for text-to-image generation using Kandinsky This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: text_encoder ([`MultilingualCLIP`]): Frozen text-encoder. tokenizer ([`XLMRobertaTokenizer`]): Tokenizer of class scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. prior_prior ([`PriorTransformer`]): The canonical unCLIP prior to approximate the image embedding from the text embedding. prior_image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. prior_text_encoder ([`CLIPTextModelWithProjection`]): Frozen text-encoder. prior_tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). prior_scheduler ([`UnCLIPScheduler`]): A scheduler to be used in combination with `prior` to generate image embedding. """ _load_connected_pipes = True model_cpu_offload_seq = "text_encoder->unet->movq->prior_prior->prior_image_encoder->prior_text_encoder" _exclude_from_cpu_offload = ["prior_prior"] def __init__( self, text_encoder: MultilingualCLIP, tokenizer: XLMRobertaTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, DDPMScheduler], movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor, ): super().__init__() self.register_modules( text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor, ) self.prior_pipe = KandinskyPriorPipeline( prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor, ) self.decoder_pipe = KandinskyPipeline( text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq, ) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = None): r""" Offloads all models (`unet`, `text_encoder`, `vae`, and `safety checker` state dicts) to CPU using 🤗 Accelerate, significantly reducing memory usage. Models are moved to a `torch.device('meta')` and loaded on a GPU only when their specific submodule's `forward` method is called. Offloading happens on a submodule basis. Memory savings are higher than using `enable_model_cpu_offload`, but performance is lower. """ self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.enable_model_cpu_offload() def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, height: int = 512, width: int = 512, prior_guidance_scale: float = 4.0, prior_num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, return_dict: bool = True, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. prior_guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. prior_num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ prior_outputs = self.prior_pipe( prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type="pt", return_dict=False, ) image_embeds = prior_outputs[0] negative_image_embeds = prior_outputs[1] prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: prompt = (image_embeds.shape[0] // len(prompt)) * prompt outputs = self.decoder_pipe( prompt=prompt, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, callback=callback, callback_steps=callback_steps, return_dict=return_dict, ) self.maybe_free_model_hooks() return outputs class KandinskyImg2ImgCombinedPipeline(DiffusionPipeline): """ Combined Pipeline for image-to-image generation using Kandinsky This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: text_encoder ([`MultilingualCLIP`]): Frozen text-encoder. tokenizer ([`XLMRobertaTokenizer`]): Tokenizer of class scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. prior_prior ([`PriorTransformer`]): The canonical unCLIP prior to approximate the image embedding from the text embedding. prior_image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. prior_text_encoder ([`CLIPTextModelWithProjection`]): Frozen text-encoder. prior_tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). prior_scheduler ([`UnCLIPScheduler`]): A scheduler to be used in combination with `prior` to generate image embedding. """ _load_connected_pipes = True model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->prior_prior->text_encoder->unet->movq" _exclude_from_cpu_offload = ["prior_prior"] def __init__( self, text_encoder: MultilingualCLIP, tokenizer: XLMRobertaTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, DDPMScheduler], movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor, ): super().__init__() self.register_modules( text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor, ) self.prior_pipe = KandinskyPriorPipeline( prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor, ) self.decoder_pipe = KandinskyImg2ImgPipeline( text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq, ) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = None): r""" Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. Note that offloading happens on a submodule basis. Memory savings are higher than with `enable_model_cpu_offload`, but performance is lower. """ self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.enable_model_cpu_offload() def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(IMAGE2IMAGE_EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], negative_prompt: Optional[Union[str, List[str]]] = None, num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, strength: float = 0.3, height: int = 512, width: int = 512, prior_guidance_scale: float = 4.0, prior_num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, return_dict: bool = True, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded again. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. strength (`float`, *optional*, defaults to 0.3): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. prior_guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. prior_num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ prior_outputs = self.prior_pipe( prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type="pt", return_dict=False, ) image_embeds = prior_outputs[0] negative_image_embeds = prior_outputs[1] prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt image = [image] if isinstance(prompt, PIL.Image.Image) else image if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: prompt = (image_embeds.shape[0] // len(prompt)) * prompt if ( isinstance(image, (list, tuple)) and len(image) < image_embeds.shape[0] and image_embeds.shape[0] % len(image) == 0 ): image = (image_embeds.shape[0] // len(image)) * image outputs = self.decoder_pipe( prompt=prompt, image=image, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, strength=strength, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, callback=callback, callback_steps=callback_steps, return_dict=return_dict, ) self.maybe_free_model_hooks() return outputs class KandinskyInpaintCombinedPipeline(DiffusionPipeline): """ Combined Pipeline for generation using Kandinsky This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: text_encoder ([`MultilingualCLIP`]): Frozen text-encoder. tokenizer ([`XLMRobertaTokenizer`]): Tokenizer of class scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. prior_prior ([`PriorTransformer`]): The canonical unCLIP prior to approximate the image embedding from the text embedding. prior_image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. prior_text_encoder ([`CLIPTextModelWithProjection`]): Frozen text-encoder. prior_tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). prior_scheduler ([`UnCLIPScheduler`]): A scheduler to be used in combination with `prior` to generate image embedding. """ _load_connected_pipes = True model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->prior_prior->text_encoder->unet->movq" _exclude_from_cpu_offload = ["prior_prior"] def __init__( self, text_encoder: MultilingualCLIP, tokenizer: XLMRobertaTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, DDPMScheduler], movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor, ): super().__init__() self.register_modules( text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor, ) self.prior_pipe = KandinskyPriorPipeline( prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor, ) self.decoder_pipe = KandinskyInpaintPipeline( text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq, ) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = None): r""" Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. Note that offloading happens on a submodule basis. Memory savings are higher than with `enable_model_cpu_offload`, but performance is lower. """ self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.enable_model_cpu_offload() def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(INPAINT_EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], mask_image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], negative_prompt: Optional[Union[str, List[str]]] = None, num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, height: int = 512, width: int = 512, prior_guidance_scale: float = 4.0, prior_num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, return_dict: bool = True, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded again. mask_image (`np.array`): Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. prior_guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. prior_num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ prior_outputs = self.prior_pipe( prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type="pt", return_dict=False, ) image_embeds = prior_outputs[0] negative_image_embeds = prior_outputs[1] prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt image = [image] if isinstance(prompt, PIL.Image.Image) else image mask_image = [mask_image] if isinstance(mask_image, PIL.Image.Image) else mask_image if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: prompt = (image_embeds.shape[0] // len(prompt)) * prompt if ( isinstance(image, (list, tuple)) and len(image) < image_embeds.shape[0] and image_embeds.shape[0] % len(image) == 0 ): image = (image_embeds.shape[0] // len(image)) * image if ( isinstance(mask_image, (list, tuple)) and len(mask_image) < image_embeds.shape[0] and image_embeds.shape[0] % len(mask_image) == 0 ): mask_image = (image_embeds.shape[0] // len(mask_image)) * mask_image outputs = self.decoder_pipe( prompt=prompt, image=image, mask_image=mask_image, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, callback=callback, callback_steps=callback_steps, return_dict=return_dict, ) self.maybe_free_model_hooks() return outputs
diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py", "repo_id": "diffusers", "token_count": 17026 }
178
from typing import Callable, Dict, List, Optional, Union import torch from transformers import T5EncoderModel, T5Tokenizer from ...loaders import StableDiffusionLoraLoaderMixin from ...models import Kandinsky3UNet, VQModel from ...schedulers import DDPMScheduler from ...utils import ( deprecate, is_torch_xla_available, logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> from diffusers import AutoPipelineForText2Image >>> import torch >>> pipe = AutoPipelineForText2Image.from_pretrained( ... "kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16 ... ) >>> pipe.enable_model_cpu_offload() >>> prompt = "A photograph of the inside of a subway train. There are raccoons sitting on the seats. One of them is reading a newspaper. The window shows the city in the background." >>> generator = torch.Generator(device="cpu").manual_seed(0) >>> image = pipe(prompt, num_inference_steps=25, generator=generator).images[0] ``` """ def downscale_height_and_width(height, width, scale_factor=8): new_height = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 new_width = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class Kandinsky3Pipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): model_cpu_offload_seq = "text_encoder->unet->movq" _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", "negative_attention_mask", "attention_mask", ] def __init__( self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, unet: Kandinsky3UNet, scheduler: DDPMScheduler, movq: VQModel, ): super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, movq=movq ) def process_embeds(self, embeddings, attention_mask, cut_context): if cut_context: embeddings[attention_mask == 0] = torch.zeros_like(embeddings[attention_mask == 0]) max_seq_length = attention_mask.sum(-1).max() + 1 embeddings = embeddings[:, :max_seq_length] attention_mask = attention_mask[:, :max_seq_length] return embeddings, attention_mask @torch.no_grad() def encode_prompt( self, prompt, do_classifier_free_guidance=True, num_images_per_prompt=1, device=None, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, _cut_context=False, attention_mask: Optional[torch.Tensor] = None, negative_attention_mask: Optional[torch.Tensor] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`, *optional*): torch device to place the resulting embeddings on num_images_per_prompt (`int`, *optional*, defaults to 1): number of images that should be generated per prompt do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask. Must provide if passing `prompt_embeds` directly. negative_attention_mask (`torch.Tensor`, *optional*): Pre-generated negative attention mask. Must provide if passing `negative_prompt_embeds` directly. """ if prompt is not None and negative_prompt is not None: if type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] max_length = 128 if prompt_embeds is None: text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids.to(device) attention_mask = text_inputs.attention_mask.to(device) prompt_embeds = self.text_encoder( text_input_ids, attention_mask=attention_mask, ) prompt_embeds = prompt_embeds[0] prompt_embeds, attention_mask = self.process_embeds(prompt_embeds, attention_mask, _cut_context) prompt_embeds = prompt_embeds * attention_mask.unsqueeze(2) if self.text_encoder is not None: dtype = self.text_encoder.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) attention_mask = attention_mask.repeat(num_images_per_prompt, 1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt if negative_prompt is not None: uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=128, truncation=True, return_attention_mask=True, return_tensors="pt", ) text_input_ids = uncond_input.input_ids.to(device) negative_attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = self.text_encoder( text_input_ids, attention_mask=negative_attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds[:, : prompt_embeds.shape[1]] negative_attention_mask = negative_attention_mask[:, : prompt_embeds.shape[1]] negative_prompt_embeds = negative_prompt_embeds * negative_attention_mask.unsqueeze(2) else: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_attention_mask = torch.zeros_like(attention_mask) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) if negative_prompt_embeds.shape != prompt_embeds.shape: negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_attention_mask = negative_attention_mask.repeat(num_images_per_prompt, 1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes else: negative_prompt_embeds = None negative_attention_mask = None return prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def check_inputs( self, prompt, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, attention_mask=None, negative_attention_mask=None, ): if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if negative_prompt_embeds is not None and negative_attention_mask is None: raise ValueError("Please provide `negative_attention_mask` along with `negative_prompt_embeds`") if negative_prompt_embeds is not None and negative_attention_mask is not None: if negative_prompt_embeds.shape[:2] != negative_attention_mask.shape: raise ValueError( "`negative_prompt_embeds` and `negative_attention_mask` must have the same batch_size and token length when passed directly, but" f" got: `negative_prompt_embeds` {negative_prompt_embeds.shape[:2]} != `negative_attention_mask`" f" {negative_attention_mask.shape}." ) if prompt_embeds is not None and attention_mask is None: raise ValueError("Please provide `attention_mask` along with `prompt_embeds`") if prompt_embeds is not None and attention_mask is not None: if prompt_embeds.shape[:2] != attention_mask.shape: raise ValueError( "`prompt_embeds` and `attention_mask` must have the same batch_size and token length when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape[:2]} != `attention_mask`" f" {attention_mask.shape}." ) @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, num_inference_steps: int = 25, guidance_scale: float = 3.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, height: Optional[int] = 1024, width: Optional[int] = 1024, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, negative_attention_mask: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, latents=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` timesteps are used. Must be in descending order. guidance_scale (`float`, *optional*, defaults to 3.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. height (`int`, *optional*, defaults to self.unet.config.sample_size): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size): The width in pixels of the generated image. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask. Must provide if passing `prompt_embeds` directly. negative_attention_mask (`torch.Tensor`, *optional*): Pre-generated negative attention mask. Must provide if passing `negative_prompt_embeds` directly. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. clean_caption (`bool`, *optional*, defaults to `True`): Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to be installed. If the dependencies are not installed, the embeddings will be created from the raw prompt. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) cut_context = True device = self._execution_device # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, callback_on_step_end_tensor_inputs, attention_mask, negative_attention_mask, ) self._guidance_scale = guidance_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # 3. Encode input prompt prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask = self.encode_prompt( prompt, self.do_classifier_free_guidance, num_images_per_prompt=num_images_per_prompt, device=device, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, _cut_context=cut_context, attention_mask=attention_mask, negative_attention_mask=negative_attention_mask, ) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) attention_mask = torch.cat([negative_attention_mask, attention_mask]).bool() # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latents height, width = downscale_height_and_width(height, width, 8) latents = self.prepare_latents( (batch_size * num_images_per_prompt, 4, height, width), prompt_embeds.dtype, device, generator, latents, self.scheduler, ) if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None: self.text_encoder_offload_hook.offload() # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, encoder_attention_mask=attention_mask, return_dict=False, )[0] if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = (guidance_scale + 1.0) * noise_pred_text - guidance_scale * noise_pred_uncond # noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step( noise_pred, t, latents, generator=generator, ).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) attention_mask = callback_outputs.pop("attention_mask", attention_mask) negative_attention_mask = callback_outputs.pop("negative_attention_mask", negative_attention_mask) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() # post-processing if output_type not in ["pt", "np", "pil", "latent"]: raise ValueError( f"Only the output types `pt`, `pil`, `np` and `latent` are supported not output_type={output_type}" ) if not output_type == "latent": image = self.movq.decode(latents, force_not_quantize=True)["sample"] if output_type in ["np", "pil"]: image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == "pil": image = self.numpy_to_pil(image) else: image = latents self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py", "repo_id": "diffusers", "token_count": 12750 }
179
# Copyright 2023-2025 Marigold Team, ETH Zürich. All rights reserved. # Copyright 2024-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -------------------------------------------------------------------------- # More information and citation instructions are available on the # Marigold project website: https://marigoldcomputervision.github.io # -------------------------------------------------------------------------- from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.nn.functional as F from PIL import Image from ... import ConfigMixin from ...configuration_utils import register_to_config from ...image_processor import PipelineImageInput from ...utils import CONFIG_NAME, logging from ...utils.import_utils import is_matplotlib_available logger = logging.get_logger(__name__) # pylint: disable=invalid-name class MarigoldImageProcessor(ConfigMixin): config_name = CONFIG_NAME @register_to_config def __init__( self, vae_scale_factor: int = 8, do_normalize: bool = True, do_range_check: bool = True, ): super().__init__() @staticmethod def expand_tensor_or_array(images: Union[torch.Tensor, np.ndarray]) -> Union[torch.Tensor, np.ndarray]: """ Expand a tensor or array to a specified number of images. """ if isinstance(images, np.ndarray): if images.ndim == 2: # [H,W] -> [1,H,W,1] images = images[None, ..., None] if images.ndim == 3: # [H,W,C] -> [1,H,W,C] images = images[None] elif isinstance(images, torch.Tensor): if images.ndim == 2: # [H,W] -> [1,1,H,W] images = images[None, None] elif images.ndim == 3: # [1,H,W] -> [1,1,H,W] images = images[None] else: raise ValueError(f"Unexpected input type: {type(images)}") return images @staticmethod def pt_to_numpy(images: torch.Tensor) -> np.ndarray: """ Convert a PyTorch tensor to a NumPy image. """ images = images.cpu().permute(0, 2, 3, 1).float().numpy() return images @staticmethod def numpy_to_pt(images: np.ndarray) -> torch.Tensor: """ Convert a NumPy image to a PyTorch tensor. """ if np.issubdtype(images.dtype, np.integer) and not np.issubdtype(images.dtype, np.unsignedinteger): raise ValueError(f"Input image dtype={images.dtype} cannot be a signed integer.") if np.issubdtype(images.dtype, np.complexfloating): raise ValueError(f"Input image dtype={images.dtype} cannot be complex.") if np.issubdtype(images.dtype, bool): raise ValueError(f"Input image dtype={images.dtype} cannot be boolean.") images = torch.from_numpy(images.transpose(0, 3, 1, 2)) return images @staticmethod def resize_antialias( image: torch.Tensor, size: Tuple[int, int], mode: str, is_aa: Optional[bool] = None ) -> torch.Tensor: if not torch.is_tensor(image): raise ValueError(f"Invalid input type={type(image)}.") if not torch.is_floating_point(image): raise ValueError(f"Invalid input dtype={image.dtype}.") if image.dim() != 4: raise ValueError(f"Invalid input dimensions; shape={image.shape}.") antialias = is_aa and mode in ("bilinear", "bicubic") image = F.interpolate(image, size, mode=mode, antialias=antialias) return image @staticmethod def resize_to_max_edge(image: torch.Tensor, max_edge_sz: int, mode: str) -> torch.Tensor: if not torch.is_tensor(image): raise ValueError(f"Invalid input type={type(image)}.") if not torch.is_floating_point(image): raise ValueError(f"Invalid input dtype={image.dtype}.") if image.dim() != 4: raise ValueError(f"Invalid input dimensions; shape={image.shape}.") h, w = image.shape[-2:] max_orig = max(h, w) new_h = h * max_edge_sz // max_orig new_w = w * max_edge_sz // max_orig if new_h == 0 or new_w == 0: raise ValueError(f"Extreme aspect ratio of the input image: [{w} x {h}]") image = MarigoldImageProcessor.resize_antialias(image, (new_h, new_w), mode, is_aa=True) return image @staticmethod def pad_image(image: torch.Tensor, align: int) -> Tuple[torch.Tensor, Tuple[int, int]]: if not torch.is_tensor(image): raise ValueError(f"Invalid input type={type(image)}.") if not torch.is_floating_point(image): raise ValueError(f"Invalid input dtype={image.dtype}.") if image.dim() != 4: raise ValueError(f"Invalid input dimensions; shape={image.shape}.") h, w = image.shape[-2:] ph, pw = -h % align, -w % align image = F.pad(image, (0, pw, 0, ph), mode="replicate") return image, (ph, pw) @staticmethod def unpad_image(image: torch.Tensor, padding: Tuple[int, int]) -> torch.Tensor: if not torch.is_tensor(image): raise ValueError(f"Invalid input type={type(image)}.") if not torch.is_floating_point(image): raise ValueError(f"Invalid input dtype={image.dtype}.") if image.dim() != 4: raise ValueError(f"Invalid input dimensions; shape={image.shape}.") ph, pw = padding uh = None if ph == 0 else -ph uw = None if pw == 0 else -pw image = image[:, :, :uh, :uw] return image @staticmethod def load_image_canonical( image: Union[torch.Tensor, np.ndarray, Image.Image], device: torch.device = torch.device("cpu"), dtype: torch.dtype = torch.float32, ) -> Tuple[torch.Tensor, int]: if isinstance(image, Image.Image): image = np.array(image) image_dtype_max = None if isinstance(image, (np.ndarray, torch.Tensor)): image = MarigoldImageProcessor.expand_tensor_or_array(image) if image.ndim != 4: raise ValueError("Input image is not 2-, 3-, or 4-dimensional.") if isinstance(image, np.ndarray): if np.issubdtype(image.dtype, np.integer) and not np.issubdtype(image.dtype, np.unsignedinteger): raise ValueError(f"Input image dtype={image.dtype} cannot be a signed integer.") if np.issubdtype(image.dtype, np.complexfloating): raise ValueError(f"Input image dtype={image.dtype} cannot be complex.") if np.issubdtype(image.dtype, bool): raise ValueError(f"Input image dtype={image.dtype} cannot be boolean.") if np.issubdtype(image.dtype, np.unsignedinteger): image_dtype_max = np.iinfo(image.dtype).max image = image.astype(np.float32) # because torch does not have unsigned dtypes beyond torch.uint8 image = MarigoldImageProcessor.numpy_to_pt(image) if torch.is_tensor(image) and not torch.is_floating_point(image) and image_dtype_max is None: if image.dtype != torch.uint8: raise ValueError(f"Image dtype={image.dtype} is not supported.") image_dtype_max = 255 if not torch.is_tensor(image): raise ValueError(f"Input type unsupported: {type(image)}.") if image.shape[1] == 1: image = image.repeat(1, 3, 1, 1) # [N,1,H,W] -> [N,3,H,W] if image.shape[1] != 3: raise ValueError(f"Input image is not 1- or 3-channel: {image.shape}.") image = image.to(device=device, dtype=dtype) if image_dtype_max is not None: image = image / image_dtype_max return image @staticmethod def check_image_values_range(image: torch.Tensor) -> None: if not torch.is_tensor(image): raise ValueError(f"Invalid input type={type(image)}.") if not torch.is_floating_point(image): raise ValueError(f"Invalid input dtype={image.dtype}.") if image.min().item() < 0.0 or image.max().item() > 1.0: raise ValueError("Input image data is partially outside of the [0,1] range.") def preprocess( self, image: PipelineImageInput, processing_resolution: Optional[int] = None, resample_method_input: str = "bilinear", device: torch.device = torch.device("cpu"), dtype: torch.dtype = torch.float32, ): if isinstance(image, list): images = None for i, img in enumerate(image): img = self.load_image_canonical(img, device, dtype) # [N,3,H,W] if images is None: images = img else: if images.shape[2:] != img.shape[2:]: raise ValueError( f"Input image[{i}] has incompatible dimensions {img.shape[2:]} with the previous images " f"{images.shape[2:]}" ) images = torch.cat((images, img), dim=0) image = images del images else: image = self.load_image_canonical(image, device, dtype) # [N,3,H,W] original_resolution = image.shape[2:] if self.config.do_range_check: self.check_image_values_range(image) if self.config.do_normalize: image = image * 2.0 - 1.0 if processing_resolution is not None and processing_resolution > 0: image = self.resize_to_max_edge(image, processing_resolution, resample_method_input) # [N,3,PH,PW] image, padding = self.pad_image(image, self.config.vae_scale_factor) # [N,3,PPH,PPW] return image, padding, original_resolution @staticmethod def colormap( image: Union[np.ndarray, torch.Tensor], cmap: str = "Spectral", bytes: bool = False, _force_method: Optional[str] = None, ) -> Union[np.ndarray, torch.Tensor]: """ Converts a monochrome image into an RGB image by applying the specified colormap. This function mimics the behavior of matplotlib.colormaps, but allows the user to use the most discriminative color maps ("Spectral", "binary") without having to install or import matplotlib. For all other cases, the function will attempt to use the native implementation. Args: image: 2D tensor of values between 0 and 1, either as np.ndarray or torch.Tensor. cmap: Colormap name. bytes: Whether to return the output as uint8 or floating point image. _force_method: Can be used to specify whether to use the native implementation (`"matplotlib"`), the efficient custom implementation of the select color maps (`"custom"`), or rely on autodetection (`None`, default). Returns: An RGB-colorized tensor corresponding to the input image. """ if not (torch.is_tensor(image) or isinstance(image, np.ndarray)): raise ValueError("Argument must be a numpy array or torch tensor.") if _force_method not in (None, "matplotlib", "custom"): raise ValueError("_force_method must be either `None`, `'matplotlib'` or `'custom'`.") supported_cmaps = { "binary": [ (1.0, 1.0, 1.0), (0.0, 0.0, 0.0), ], "Spectral": [ # Taken from matplotlib/_cm.py (0.61960784313725492, 0.003921568627450980, 0.25882352941176473), # 0.0 -> [0] (0.83529411764705885, 0.24313725490196078, 0.30980392156862746), (0.95686274509803926, 0.42745098039215684, 0.2627450980392157), (0.99215686274509807, 0.68235294117647061, 0.38039215686274508), (0.99607843137254903, 0.8784313725490196, 0.54509803921568623), (1.0, 1.0, 0.74901960784313726), (0.90196078431372551, 0.96078431372549022, 0.59607843137254901), (0.6705882352941176, 0.8666666666666667, 0.64313725490196083), (0.4, 0.76078431372549016, 0.6470588235294118), (0.19607843137254902, 0.53333333333333333, 0.74117647058823533), (0.36862745098039218, 0.30980392156862746, 0.63529411764705879), # 1.0 -> [K-1] ], } def method_matplotlib(image, cmap, bytes=False): if is_matplotlib_available(): import matplotlib else: return None arg_is_pt, device = torch.is_tensor(image), None if arg_is_pt: image, device = image.cpu().numpy(), image.device if cmap not in matplotlib.colormaps: raise ValueError( f"Unexpected color map {cmap}; available options are: {', '.join(list(matplotlib.colormaps.keys()))}" ) cmap = matplotlib.colormaps[cmap] out = cmap(image, bytes=bytes) # [?,4] out = out[..., :3] # [?,3] if arg_is_pt: out = torch.tensor(out, device=device) return out def method_custom(image, cmap, bytes=False): arg_is_np = isinstance(image, np.ndarray) if arg_is_np: image = torch.tensor(image) if image.dtype == torch.uint8: image = image.float() / 255 else: image = image.float() is_cmap_reversed = cmap.endswith("_r") if is_cmap_reversed: cmap = cmap[:-2] if cmap not in supported_cmaps: raise ValueError( f"Only {list(supported_cmaps.keys())} color maps are available without installing matplotlib." ) cmap = supported_cmaps[cmap] if is_cmap_reversed: cmap = cmap[::-1] cmap = torch.tensor(cmap, dtype=torch.float, device=image.device) # [K,3] K = cmap.shape[0] pos = image.clamp(min=0, max=1) * (K - 1) left = pos.long() right = (left + 1).clamp(max=K - 1) d = (pos - left.float()).unsqueeze(-1) left_colors = cmap[left] right_colors = cmap[right] out = (1 - d) * left_colors + d * right_colors if bytes: out = (out * 255).to(torch.uint8) if arg_is_np: out = out.numpy() return out if _force_method is None and torch.is_tensor(image) and cmap == "Spectral": return method_custom(image, cmap, bytes) out = None if _force_method != "custom": out = method_matplotlib(image, cmap, bytes) if _force_method == "matplotlib" and out is None: raise ImportError("Make sure to install matplotlib if you want to use a color map other than 'Spectral'.") if out is None: out = method_custom(image, cmap, bytes) return out @staticmethod def visualize_depth( depth: Union[ PIL.Image.Image, np.ndarray, torch.Tensor, List[PIL.Image.Image], List[np.ndarray], List[torch.Tensor], ], val_min: float = 0.0, val_max: float = 1.0, color_map: str = "Spectral", ) -> List[PIL.Image.Image]: """ Visualizes depth maps, such as predictions of the `MarigoldDepthPipeline`. Args: depth (`Union[PIL.Image.Image, np.ndarray, torch.Tensor, List[PIL.Image.Image], List[np.ndarray], List[torch.Tensor]]`): Depth maps. val_min (`float`, *optional*, defaults to `0.0`): Minimum value of the visualized depth range. val_max (`float`, *optional*, defaults to `1.0`): Maximum value of the visualized depth range. color_map (`str`, *optional*, defaults to `"Spectral"`): Color map used to convert a single-channel depth prediction into colored representation. Returns: `List[PIL.Image.Image]` with depth maps visualization. """ if val_max <= val_min: raise ValueError(f"Invalid values range: [{val_min}, {val_max}].") def visualize_depth_one(img, idx=None): prefix = "Depth" + (f"[{idx}]" if idx else "") if isinstance(img, PIL.Image.Image): if img.mode != "I;16": raise ValueError(f"{prefix}: invalid PIL mode={img.mode}.") img = np.array(img).astype(np.float32) / (2**16 - 1) if isinstance(img, np.ndarray) or torch.is_tensor(img): if img.ndim != 2: raise ValueError(f"{prefix}: unexpected shape={img.shape}.") if isinstance(img, np.ndarray): img = torch.from_numpy(img) if not torch.is_floating_point(img): raise ValueError(f"{prefix}: unexpected dtype={img.dtype}.") else: raise ValueError(f"{prefix}: unexpected type={type(img)}.") if val_min != 0.0 or val_max != 1.0: img = (img - val_min) / (val_max - val_min) img = MarigoldImageProcessor.colormap(img, cmap=color_map, bytes=True) # [H,W,3] img = PIL.Image.fromarray(img.cpu().numpy()) return img if depth is None or isinstance(depth, list) and any(o is None for o in depth): raise ValueError("Input depth is `None`") if isinstance(depth, (np.ndarray, torch.Tensor)): depth = MarigoldImageProcessor.expand_tensor_or_array(depth) if isinstance(depth, np.ndarray): depth = MarigoldImageProcessor.numpy_to_pt(depth) # [N,H,W,1] -> [N,1,H,W] if not (depth.ndim == 4 and depth.shape[1] == 1): # [N,1,H,W] raise ValueError(f"Unexpected input shape={depth.shape}, expecting [N,1,H,W].") return [visualize_depth_one(img[0], idx) for idx, img in enumerate(depth)] elif isinstance(depth, list): return [visualize_depth_one(img, idx) for idx, img in enumerate(depth)] else: raise ValueError(f"Unexpected input type: {type(depth)}") @staticmethod def export_depth_to_16bit_png( depth: Union[np.ndarray, torch.Tensor, List[np.ndarray], List[torch.Tensor]], val_min: float = 0.0, val_max: float = 1.0, ) -> List[PIL.Image.Image]: def export_depth_to_16bit_png_one(img, idx=None): prefix = "Depth" + (f"[{idx}]" if idx else "") if not isinstance(img, np.ndarray) and not torch.is_tensor(img): raise ValueError(f"{prefix}: unexpected type={type(img)}.") if img.ndim != 2: raise ValueError(f"{prefix}: unexpected shape={img.shape}.") if torch.is_tensor(img): img = img.cpu().numpy() if not np.issubdtype(img.dtype, np.floating): raise ValueError(f"{prefix}: unexpected dtype={img.dtype}.") if val_min != 0.0 or val_max != 1.0: img = (img - val_min) / (val_max - val_min) img = (img * (2**16 - 1)).astype(np.uint16) img = PIL.Image.fromarray(img, mode="I;16") return img if depth is None or isinstance(depth, list) and any(o is None for o in depth): raise ValueError("Input depth is `None`") if isinstance(depth, (np.ndarray, torch.Tensor)): depth = MarigoldImageProcessor.expand_tensor_or_array(depth) if isinstance(depth, np.ndarray): depth = MarigoldImageProcessor.numpy_to_pt(depth) # [N,H,W,1] -> [N,1,H,W] if not (depth.ndim == 4 and depth.shape[1] == 1): raise ValueError(f"Unexpected input shape={depth.shape}, expecting [N,1,H,W].") return [export_depth_to_16bit_png_one(img[0], idx) for idx, img in enumerate(depth)] elif isinstance(depth, list): return [export_depth_to_16bit_png_one(img, idx) for idx, img in enumerate(depth)] else: raise ValueError(f"Unexpected input type: {type(depth)}") @staticmethod def visualize_normals( normals: Union[ np.ndarray, torch.Tensor, List[np.ndarray], List[torch.Tensor], ], flip_x: bool = False, flip_y: bool = False, flip_z: bool = False, ) -> List[PIL.Image.Image]: """ Visualizes surface normals, such as predictions of the `MarigoldNormalsPipeline`. Args: normals (`Union[np.ndarray, torch.Tensor, List[np.ndarray], List[torch.Tensor]]`): Surface normals. flip_x (`bool`, *optional*, defaults to `False`): Flips the X axis of the normals frame of reference. Default direction is right. flip_y (`bool`, *optional*, defaults to `False`): Flips the Y axis of the normals frame of reference. Default direction is top. flip_z (`bool`, *optional*, defaults to `False`): Flips the Z axis of the normals frame of reference. Default direction is facing the observer. Returns: `List[PIL.Image.Image]` with surface normals visualization. """ flip_vec = None if any((flip_x, flip_y, flip_z)): flip_vec = torch.tensor( [ (-1) ** flip_x, (-1) ** flip_y, (-1) ** flip_z, ], dtype=torch.float32, ) def visualize_normals_one(img, idx=None): img = img.permute(1, 2, 0) if flip_vec is not None: img *= flip_vec.to(img.device) img = (img + 1.0) * 0.5 img = (img * 255).to(dtype=torch.uint8, device="cpu").numpy() img = PIL.Image.fromarray(img) return img if normals is None or isinstance(normals, list) and any(o is None for o in normals): raise ValueError("Input normals is `None`") if isinstance(normals, (np.ndarray, torch.Tensor)): normals = MarigoldImageProcessor.expand_tensor_or_array(normals) if isinstance(normals, np.ndarray): normals = MarigoldImageProcessor.numpy_to_pt(normals) # [N,3,H,W] if not (normals.ndim == 4 and normals.shape[1] == 3): raise ValueError(f"Unexpected input shape={normals.shape}, expecting [N,3,H,W].") return [visualize_normals_one(img, idx) for idx, img in enumerate(normals)] elif isinstance(normals, list): return [visualize_normals_one(img, idx) for idx, img in enumerate(normals)] else: raise ValueError(f"Unexpected input type: {type(normals)}") @staticmethod def visualize_intrinsics( prediction: Union[ np.ndarray, torch.Tensor, List[np.ndarray], List[torch.Tensor], ], target_properties: Dict[str, Any], color_map: Union[str, Dict[str, str]] = "binary", ) -> List[Dict[str, PIL.Image.Image]]: """ Visualizes intrinsic image decomposition, such as predictions of the `MarigoldIntrinsicsPipeline`. Args: prediction (`Union[np.ndarray, torch.Tensor, List[np.ndarray], List[torch.Tensor]]`): Intrinsic image decomposition. target_properties (`Dict[str, Any]`): Decomposition properties. Expected entries: `target_names: List[str]` and a dictionary with keys `prediction_space: str`, `sub_target_names: List[Union[str, Null]]` (must have 3 entries, null for missing modalities), `up_to_scale: bool`, one for each target and sub-target. color_map (`Union[str, Dict[str, str]]`, *optional*, defaults to `"Spectral"`): Color map used to convert a single-channel predictions into colored representations. When a dictionary is passed, each modality can be colored with its own color map. Returns: `List[Dict[str, PIL.Image.Image]]` with intrinsic image decomposition visualization. """ if "target_names" not in target_properties: raise ValueError("Missing `target_names` in target_properties") if not isinstance(color_map, str) and not ( isinstance(color_map, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in color_map.items()) ): raise ValueError("`color_map` must be a string or a dictionary of strings") n_targets = len(target_properties["target_names"]) def visualize_targets_one(images, idx=None): # img: [T, 3, H, W] out = {} for target_name, img in zip(target_properties["target_names"], images): img = img.permute(1, 2, 0) # [H, W, 3] prediction_space = target_properties[target_name].get("prediction_space", "srgb") if prediction_space == "stack": sub_target_names = target_properties[target_name]["sub_target_names"] if len(sub_target_names) != 3 or any( not (isinstance(s, str) or s is None) for s in sub_target_names ): raise ValueError(f"Unexpected target sub-names {sub_target_names} in {target_name}") for i, sub_target_name in enumerate(sub_target_names): if sub_target_name is None: continue sub_img = img[:, :, i] sub_prediction_space = target_properties[sub_target_name].get("prediction_space", "srgb") if sub_prediction_space == "linear": sub_up_to_scale = target_properties[sub_target_name].get("up_to_scale", False) if sub_up_to_scale: sub_img = sub_img / max(sub_img.max().item(), 1e-6) sub_img = sub_img ** (1 / 2.2) cmap_name = ( color_map if isinstance(color_map, str) else color_map.get(sub_target_name, "binary") ) sub_img = MarigoldImageProcessor.colormap(sub_img, cmap=cmap_name, bytes=True) sub_img = PIL.Image.fromarray(sub_img.cpu().numpy()) out[sub_target_name] = sub_img elif prediction_space == "linear": up_to_scale = target_properties[target_name].get("up_to_scale", False) if up_to_scale: img = img / max(img.max().item(), 1e-6) img = img ** (1 / 2.2) elif prediction_space == "srgb": pass img = (img * 255).to(dtype=torch.uint8, device="cpu").numpy() img = PIL.Image.fromarray(img) out[target_name] = img return out if prediction is None or isinstance(prediction, list) and any(o is None for o in prediction): raise ValueError("Input prediction is `None`") if isinstance(prediction, (np.ndarray, torch.Tensor)): prediction = MarigoldImageProcessor.expand_tensor_or_array(prediction) if isinstance(prediction, np.ndarray): prediction = MarigoldImageProcessor.numpy_to_pt(prediction) # [N*T,3,H,W] if not (prediction.ndim == 4 and prediction.shape[1] == 3 and prediction.shape[0] % n_targets == 0): raise ValueError(f"Unexpected input shape={prediction.shape}, expecting [N*T,3,H,W].") N_T, _, H, W = prediction.shape N = N_T // n_targets prediction = prediction.reshape(N, n_targets, 3, H, W) return [visualize_targets_one(img, idx) for idx, img in enumerate(prediction)] elif isinstance(prediction, list): return [visualize_targets_one(img, idx) for idx, img in enumerate(prediction)] else: raise ValueError(f"Unexpected input type: {type(prediction)}") @staticmethod def visualize_uncertainty( uncertainty: Union[ np.ndarray, torch.Tensor, List[np.ndarray], List[torch.Tensor], ], saturation_percentile=95, ) -> List[PIL.Image.Image]: """ Visualizes dense uncertainties, such as produced by `MarigoldDepthPipeline`, `MarigoldNormalsPipeline`, or `MarigoldIntrinsicsPipeline`. Args: uncertainty (`Union[np.ndarray, torch.Tensor, List[np.ndarray], List[torch.Tensor]]`): Uncertainty maps. saturation_percentile (`int`, *optional*, defaults to `95`): Specifies the percentile uncertainty value visualized with maximum intensity. Returns: `List[PIL.Image.Image]` with uncertainty visualization. """ def visualize_uncertainty_one(img, idx=None): prefix = "Uncertainty" + (f"[{idx}]" if idx else "") if img.min() < 0: raise ValueError(f"{prefix}: unexpected data range, min={img.min()}.") img = img.permute(1, 2, 0) # [H,W,C] img = img.squeeze(2).cpu().numpy() # [H,W] or [H,W,3] saturation_value = np.percentile(img, saturation_percentile) img = np.clip(img * 255 / saturation_value, 0, 255) img = img.astype(np.uint8) img = PIL.Image.fromarray(img) return img if uncertainty is None or isinstance(uncertainty, list) and any(o is None for o in uncertainty): raise ValueError("Input uncertainty is `None`") if isinstance(uncertainty, (np.ndarray, torch.Tensor)): uncertainty = MarigoldImageProcessor.expand_tensor_or_array(uncertainty) if isinstance(uncertainty, np.ndarray): uncertainty = MarigoldImageProcessor.numpy_to_pt(uncertainty) # [N,C,H,W] if not (uncertainty.ndim == 4 and uncertainty.shape[1] in (1, 3)): raise ValueError(f"Unexpected input shape={uncertainty.shape}, expecting [N,C,H,W] with C in (1,3).") return [visualize_uncertainty_one(img, idx) for idx, img in enumerate(uncertainty)] elif isinstance(uncertainty, list): return [visualize_uncertainty_one(img, idx) for idx, img in enumerate(uncertainty)] else: raise ValueError(f"Unexpected input type: {type(uncertainty)}")
diffusers/src/diffusers/pipelines/marigold/marigold_image_processing.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/marigold/marigold_image_processing.py", "repo_id": "diffusers", "token_count": 14872 }
180
from dataclasses import dataclass from typing import TYPE_CHECKING, List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["image_encoder"] = ["PaintByExampleImageEncoder"] _import_structure["pipeline_paint_by_example"] = ["PaintByExamplePipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .image_encoder import PaintByExampleImageEncoder from .pipeline_paint_by_example import PaintByExamplePipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/paint_by_example/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/paint_by_example/__init__.py", "repo_id": "diffusers", "token_count": 599 }
181
# Copyright 2025 The GLIGEN Authors and HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class CLIPImageProjection(ModelMixin, ConfigMixin): @register_to_config def __init__(self, hidden_size: int = 768): super().__init__() self.hidden_size = hidden_size self.project = nn.Linear(self.hidden_size, self.hidden_size, bias=False) def forward(self, x): return self.project(x)
diffusers/src/diffusers/pipelines/stable_diffusion/clip_image_project_model.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion/clip_image_project_model.py", "repo_id": "diffusers", "token_count": 336 }
182
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPTextModel, CLIPTokenizer from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...schedulers import EulerDiscreteScheduler from ...utils import deprecate, is_torch_xla_available, logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput, StableDiffusionMixin if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.preprocess def preprocess(image): warnings.warn( "The preprocess method is deprecated and will be removed in a future version. Please" " use VaeImageProcessor.preprocess instead", FutureWarning, ) if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): w, h = image[0].size w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 64 image = [np.array(i.resize((w, h)))[None, :] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image class StableDiffusionLatentUpscalePipeline(DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin): r""" Pipeline for upscaling Stable Diffusion output image resolution by a factor of 2. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A [`EulerDiscreteScheduler`] to be used in combination with `unet` to denoise the encoded image latents. """ model_cpu_offload_seq = "text_encoder->unet->vae" def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: EulerDiscreteScheduler, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, resample="bicubic") def _encode_prompt( self, prompt, device, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt=prompt, device=device, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, **kwargs, ) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds]) return prompt_embeds, pooled_prompt_embeds def encode_prompt( self, prompt, device, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list(int)`): prompt to be encoded device: (`torch.device`): torch device do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. """ if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None or pooled_prompt_embeds is None: text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_length=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_encoder_out = self.text_encoder( text_input_ids.to(device), output_hidden_states=True, ) prompt_embeds = text_encoder_out.hidden_states[-1] pooled_prompt_embeds = text_encoder_out.pooler_output # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: if negative_prompt_embeds is None or negative_pooled_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_length=True, return_tensors="pt", ) uncond_encoder_out = self.text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) negative_prompt_embeds = uncond_encoder_out.hidden_states[-1] negative_pooled_prompt_embeds = uncond_encoder_out.pooler_output return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def check_inputs( self, prompt, image, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, ): if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) if ( not isinstance(image, torch.Tensor) and not isinstance(image, np.ndarray) and not isinstance(image, PIL.Image.Image) and not isinstance(image, list) ): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or `list` but is {type(image)}" ) # verify batch size of prompt and image are same if image is a list or tensor if isinstance(image, (list, torch.Tensor)): if prompt is not None: if isinstance(prompt, str): batch_size = 1 else: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if isinstance(image, list): image_batch_size = len(image) else: image_batch_size = image.shape[0] if image.ndim == 4 else 1 if batch_size != image_batch_size: raise ValueError( f"`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}." " Please make sure that passed `prompt` matches the batch size of `image`." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height, width) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, image: PipelineImageInput = None, num_inference_steps: int = 75, guidance_scale: float = 9.0, negative_prompt: Optional[Union[str, List[str]]] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide image upscaling. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image` or tensor representing an image batch to be upscaled. If it's a tensor, it can be either a latent output from a Stable Diffusion model or an image tensor in the range `[-1, 1]`. It is considered a `latent` if `image.shape[1]` is `4`; otherwise, it is considered to be an image representation and encoded using this pipeline's `vae` encoder. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. Examples: ```py >>> from diffusers import StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline >>> import torch >>> pipeline = StableDiffusionPipeline.from_pretrained( ... "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16 ... ) >>> pipeline.to("cuda") >>> model_id = "stabilityai/sd-x2-latent-upscaler" >>> upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16) >>> upscaler.to("cuda") >>> prompt = "a photo of an astronaut high resolution, unreal engine, ultra realistic" >>> generator = torch.manual_seed(33) >>> low_res_latents = pipeline(prompt, generator=generator, output_type="latent").images >>> with torch.no_grad(): ... image = pipeline.decode_latents(low_res_latents) >>> image = pipeline.numpy_to_pil(image)[0] >>> image.save("../images/a1.png") >>> upscaled_image = upscaler( ... prompt=prompt, ... image=low_res_latents, ... num_inference_steps=20, ... guidance_scale=0, ... generator=generator, ... ).images[0] >>> upscaled_image.save("../images/a2.png") ``` Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ # 1. Check inputs self.check_inputs( prompt, image, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) # 2. Define call parameters if prompt is not None: batch_size = 1 if isinstance(prompt, str) else len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 if guidance_scale == 0: prompt = [""] * batch_size # 3. Encode input prompt ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt, device, do_classifier_free_guidance, negative_prompt, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds]) # 4. Preprocess image image = self.image_processor.preprocess(image) image = image.to(dtype=prompt_embeds.dtype, device=device) if image.shape[1] == 3: # encode image if not in latent-space yet image = retrieve_latents(self.vae.encode(image), generator=generator) * self.vae.config.scaling_factor # 5. set timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps batch_multiplier = 2 if do_classifier_free_guidance else 1 image = image[None, :] if image.ndim == 3 else image image = torch.cat([image] * batch_multiplier) # 5. Add noise to image (set to be 0): # (see below notes from the author): # "the This step theoretically can make the model work better on out-of-distribution inputs, but mostly just seems to make it match the input less, so it's turned off by default." noise_level = torch.tensor([0.0], dtype=torch.float32, device=device) noise_level = torch.cat([noise_level] * image.shape[0]) inv_noise_level = (noise_level**2 + 1) ** (-0.5) image_cond = F.interpolate(image, scale_factor=2, mode="nearest") * inv_noise_level[:, None, None, None] image_cond = image_cond.to(prompt_embeds.dtype) noise_level_embed = torch.cat( [ torch.ones(pooled_prompt_embeds.shape[0], 64, dtype=pooled_prompt_embeds.dtype, device=device), torch.zeros(pooled_prompt_embeds.shape[0], 64, dtype=pooled_prompt_embeds.dtype, device=device), ], dim=1, ) timestep_condition = torch.cat([noise_level_embed, pooled_prompt_embeds], dim=1) # 6. Prepare latent variables height, width = image.shape[2:] num_channels_latents = self.vae.config.latent_channels latents = self.prepare_latents( batch_size, num_channels_latents, height * 2, # 2x upscale width * 2, prompt_embeds.dtype, device, generator, latents, ) # 7. Check that sizes of image and latents match num_channels_image = image.shape[1] if num_channels_latents + num_channels_image != self.unet.config.in_channels: raise ValueError( f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" f" `num_channels_image`: {num_channels_image} " f" = {num_channels_latents + num_channels_image}. Please verify the config of" " `pipeline.unet` or your `image` input." ) # 9. Denoising loop num_warmup_steps = 0 with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): sigma = self.scheduler.sigmas[i] # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents scaled_model_input = self.scheduler.scale_model_input(latent_model_input, t) scaled_model_input = torch.cat([scaled_model_input, image_cond], dim=1) # preconditioning parameter based on Karras et al. (2022) (table 1) timestep = torch.log(sigma) * 0.25 noise_pred = self.unet( scaled_model_input, timestep, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_condition, ).sample # in original repo, the output contains a variance channel that's not used noise_pred = noise_pred[:, :-1] # apply preconditioning, based on table 1 in Karras et al. (2022) inv_sigma = 1 / (sigma**2 + 1) noise_pred = inv_sigma * latent_model_input + self.scheduler.scale_model_input(sigma, t) * noise_pred # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py", "repo_id": "diffusers", "token_count": 13951 }
183
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_torch_available, is_transformers_available, ) _dummy_objects = {} _additional_imports = {} _import_structure = {"pipeline_output": ["StableDiffusionXLPipelineOutput"]} if is_transformers_available() and is_flax_available(): _import_structure["pipeline_output"].extend(["FlaxStableDiffusionXLPipelineOutput"]) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["pipeline_stable_diffusion_xl"] = ["StableDiffusionXLPipeline"] _import_structure["pipeline_stable_diffusion_xl_img2img"] = ["StableDiffusionXLImg2ImgPipeline"] _import_structure["pipeline_stable_diffusion_xl_inpaint"] = ["StableDiffusionXLInpaintPipeline"] _import_structure["pipeline_stable_diffusion_xl_instruct_pix2pix"] = ["StableDiffusionXLInstructPix2PixPipeline"] if is_transformers_available() and is_flax_available(): from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState _additional_imports.update({"PNDMSchedulerState": PNDMSchedulerState}) _import_structure["pipeline_flax_stable_diffusion_xl"] = ["FlaxStableDiffusionXLPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_stable_diffusion_xl import StableDiffusionXLPipeline from .pipeline_stable_diffusion_xl_img2img import StableDiffusionXLImg2ImgPipeline from .pipeline_stable_diffusion_xl_inpaint import StableDiffusionXLInpaintPipeline from .pipeline_stable_diffusion_xl_instruct_pix2pix import StableDiffusionXLInstructPix2PixPipeline try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_flax_objects import * else: from .pipeline_flax_stable_diffusion_xl import ( FlaxStableDiffusionXLPipeline, ) from .pipeline_output import FlaxStableDiffusionXLPipelineOutput else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) for name, value in _additional_imports.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/stable_diffusion_xl/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_xl/__init__.py", "repo_id": "diffusers", "token_count": 1202 }
184
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import torch from transformers import CLIPTextModel, CLIPTokenizer from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet3DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin from . import TextToVideoSDPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler >>> from diffusers.utils import export_to_video >>> pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16) >>> pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) >>> pipe.to("cuda") >>> prompt = "spiderman running in the desert" >>> video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames[0] >>> # safe low-res video >>> video_path = export_to_video(video_frames, output_video_path="./video_576_spiderman.mp4") >>> # let's offload the text-to-image model >>> pipe.to("cpu") >>> # and load the image-to-image model >>> pipe = DiffusionPipeline.from_pretrained( ... "cerspense/zeroscope_v2_XL", torch_dtype=torch.float16, revision="refs/pr/15" ... ) >>> pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) >>> pipe.enable_model_cpu_offload() >>> # The VAE consumes A LOT of memory, let's make sure we run it in sliced mode >>> pipe.vae.enable_slicing() >>> # now let's upscale it >>> video = [Image.fromarray(frame).resize((1024, 576)) for frame in video_frames] >>> # and denoise it >>> video_frames = pipe(prompt, video=video, strength=0.6).frames[0] >>> video_path = export_to_video(video_frames, output_video_path="./video_1024_spiderman.mp4") >>> video_path ``` """ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class VideoToVideoSDPipeline( DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, ): _last_supported_version = "0.33.1" r""" Pipeline for text-guided video-to-video generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer (`CLIPTokenizer`): A [`~transformers.CLIPTokenizer`] to tokenize text. unet ([`UNet3DConditionModel`]): A [`UNet3DConditionModel`] to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ model_cpu_offload_seq = "text_encoder->unet->vae" def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, scheduler: KarrasDiffusionSchedulers, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.video_processor = VideoProcessor(do_resize=False, vae_scale_factor=self.vae_scale_factor) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents batch_size, channels, num_frames, height, width = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) image = self.vae.decode(latents).sample video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 video = video.float() return video # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] if hasattr(self.scheduler, "set_begin_index"): self.scheduler.set_begin_index(t_start * self.scheduler.order) return timesteps, num_inference_steps - t_start def prepare_latents(self, video, timestep, batch_size, dtype, device, generator=None): video = video.to(device=device, dtype=dtype) # change from (b, c, f, h, w) -> (b * f, c, w, h) bsz, channel, frames, width, height = video.shape video = video.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) if video.shape[1] == 4: init_latents = video else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(generator, list): init_latents = [ retrieve_latents(self.vae.encode(video[i : i + 1]), generator=generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(video), generator=generator) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `video` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents latents = latents[None, :].reshape((bsz, frames, latents.shape[1]) + latents.shape[2:]).permute(0, 2, 1, 3, 4) return latents @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, video: Union[List[np.ndarray], torch.Tensor] = None, strength: float = 0.6, num_inference_steps: int = 50, guidance_scale: float = 15.0, negative_prompt: Optional[Union[str, List[str]]] = None, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "np", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, clip_skip: Optional[int] = None, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. video (`List[np.ndarray]` or `torch.Tensor`): `video` frames or tensor representing a video batch to be used as the starting point for the process. Can also accept video latents as `image`, if passing latents directly, it will not be encoded again. strength (`float`, *optional*, defaults to 0.8): Indicates extent to transform the reference `video`. Must be between 0 and 1. `video` is used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 essentially ignores `video`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality videos at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in video generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. Latents should be of shape `(batch_size, num_channel, num_frames, height, width)`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"np"`): The output format of the generated video. Choose between `torch.Tensor` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. Examples: Returns: [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. """ # 0. Default height and width to unet num_images_per_prompt = 1 # 1. Check inputs. Raise error if not correct self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) # 4. Preprocess video video = self.video_processor.preprocess_video(video) # 5. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # 6. Prepare latent variables latents = self.prepare_latents(video, latent_timestep, batch_size, prompt_embeds.dtype, device, generator) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # reshape latents bsz, channel, frames, width, height = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # reshape latents back latents = latents[None, :].reshape(bsz, frames, channel, width, height).permute(0, 2, 1, 3, 4) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() # manually for max memory savings if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.unet.to("cpu") # 9. Post processing if output_type == "latent": video = latents else: video_tensor = self.decode_latents(latents) video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) # 10. Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return TextToVideoSDPipelineOutput(frames=video)
diffusers/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py", "repo_id": "diffusers", "token_count": 15431 }
185
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["pipeline_wan"] = ["WanPipeline"] _import_structure["pipeline_wan_i2v"] = ["WanImageToVideoPipeline"] _import_structure["pipeline_wan_vace"] = ["WanVACEPipeline"] _import_structure["pipeline_wan_video2video"] = ["WanVideoToVideoPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_wan import WanPipeline from .pipeline_wan_i2v import WanImageToVideoPipeline from .pipeline_wan_vace import WanVACEPipeline from .pipeline_wan_video2video import WanVideoToVideoPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/wan/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/wan/__init__.py", "repo_id": "diffusers", "token_count": 672 }
186