text
stringlengths
7
1.24M
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
519
<script lang="ts"> import type { readAndCompressImage } from "browser-image-resizer"; import type { Model } from "$lib/types/Model"; import type { Assistant } from "$lib/types/Assistant"; import { onMount } from "svelte"; import { applyAction, enhance } from "$app/forms"; import { page } from "$app/stores"; import { base } from "$app/paths"; import CarbonPen from "~icons/carbon/pen"; import CarbonUpload from "~icons/carbon/upload"; import CarbonHelpFilled from "~icons/carbon/help"; import CarbonSettingsAdjust from "~icons/carbon/settings-adjust"; import CarbonTools from "~icons/carbon/tools"; import { useSettingsStore } from "$lib/stores/settings"; import { isHuggingChat } from "$lib/utils/isHuggingChat"; import IconInternet from "./icons/IconInternet.svelte"; import TokensCounter from "./TokensCounter.svelte"; import HoverTooltip from "./HoverTooltip.svelte"; import { findCurrentModel } from "$lib/utils/models"; import AssistantToolPicker from "./AssistantToolPicker.svelte"; type ActionData = { error: boolean; errors: { field: string | number; message: string; }[]; } | null; type AssistantFront = Omit<Assistant, "_id" | "createdById"> & { _id: string }; export let form: ActionData; export let assistant: AssistantFront | undefined = undefined; export let models: Model[] = []; let files: FileList | null = null; const settings = useSettingsStore(); let modelId = ""; let systemPrompt = assistant?.preprompt ?? ""; let dynamicPrompt = assistant?.dynamicPrompt ?? false; let showModelSettings = Object.values(assistant?.generateSettings ?? {}).some((v) => !!v); let compress: typeof readAndCompressImage | null = null; onMount(async () => { const module = await import("browser-image-resizer"); compress = module.readAndCompressImage; modelId = findCurrentModel(models, assistant ? assistant.modelId : $settings.activeModel).id; }); let inputMessage1 = assistant?.exampleInputs[0] ?? ""; let inputMessage2 = assistant?.exampleInputs[1] ?? ""; let inputMessage3 = assistant?.exampleInputs[2] ?? ""; let inputMessage4 = assistant?.exampleInputs[3] ?? ""; function resetErrors() { if (form) { form.errors = []; form.error = false; } } function onFilesChange(e: Event) { const inputEl = e.target as HTMLInputElement; if (inputEl.files?.length && inputEl.files[0].size > 0) { if (!inputEl.files[0].type.includes("image")) { inputEl.files = null; files = null; form = { error: true, errors: [{ field: "avatar", message: "Only images are allowed" }] }; return; } files = inputEl.files; resetErrors(); deleteExistingAvatar = false; } } function getError(field: string, returnForm: ActionData) { return returnForm?.errors.find((error) => error.field === field)?.message ?? ""; } let deleteExistingAvatar = false; let loading = false; let ragMode: false | "links" | "domains" | "all" = assistant?.rag?.allowAllDomains ? "all" : assistant?.rag?.allowedLinks?.length ?? 0 > 0 ? "links" : (assistant?.rag?.allowedDomains?.length ?? 0) > 0 ? "domains" : false; let tools = assistant?.tools ?? []; const regex = /{{\s?url=(.+?)\s?}}/g; $: templateVariables = [...systemPrompt.matchAll(regex)].map((match) => match[1]); $: selectedModel = models.find((m) => m.id === modelId); </script> <form method="POST" class="relative flex h-full flex-col overflow-y-auto p-4 md:p-8" enctype="multipart/form-data" use:enhance={async ({ formData }) => { loading = true; if (files?.[0] && files[0].size > 0 && compress) { await compress(files[0], { maxWidth: 500, maxHeight: 500, quality: 1, }).then((resizedImage) => { formData.set("avatar", resizedImage); }); } if (deleteExistingAvatar === true) { if (assistant?.avatar) { // if there is an avatar we explicitly removei t formData.set("avatar", "null"); } else { // else we just remove it from the input formData.delete("avatar"); } } else { if (files === null) { formData.delete("avatar"); } } formData.delete("ragMode"); if (ragMode === false || !$page.data.enableAssistantsRAG) { formData.set("ragAllowAll", "false"); formData.set("ragLinkList", ""); formData.set("ragDomainList", ""); } else if (ragMode === "all") { formData.set("ragAllowAll", "true"); formData.set("ragLinkList", ""); formData.set("ragDomainList", ""); } else if (ragMode === "links") { formData.set("ragAllowAll", "false"); formData.set("ragDomainList", ""); } else if (ragMode === "domains") { formData.set("ragAllowAll", "false"); formData.set("ragLinkList", ""); } formData.set("tools", tools.join(",")); return async ({ result }) => { loading = false; await applyAction(result); }; }} > {#if assistant} <h2 class="text-xl font-semibold"> Edit Assistant: {assistant?.name ?? "assistant"} </h2> <p class="mb-6 text-sm text-gray-500"> Modifying an existing assistant will propagate the changes to all users. </p> {:else} <h2 class="text-xl font-semibold">Create new assistant</h2> <p class="mb-6 text-sm text-gray-500"> Create and share your own AI Assistant. All assistants are <span class="rounded-full border px-2 py-0.5 leading-none">public</span > </p> {/if} <div class="grid h-full w-full flex-1 grid-cols-2 gap-6 text-sm max-sm:grid-cols-1"> <div class="col-span-1 flex flex-col gap-4"> <div> <div class="mb-1 block pb-2 text-sm font-semibold">Avatar</div> <input type="file" accept="image/*" name="avatar" id="avatar" class="hidden" on:change={onFilesChange} /> {#if (files && files[0]) || (assistant?.avatar && !deleteExistingAvatar)} <div class="group relative mx-auto h-12 w-12"> {#if files && files[0]} <img src={URL.createObjectURL(files[0])} alt="avatar" class="crop mx-auto h-12 w-12 cursor-pointer rounded-full object-cover" /> {:else if assistant?.avatar} <img src="{base}/settings/assistants/{assistant._id}/avatar.jpg?hash={assistant.avatar}" alt="avatar" class="crop mx-auto h-12 w-12 cursor-pointer rounded-full object-cover" /> {/if} <label for="avatar" class="invisible absolute bottom-0 h-12 w-12 rounded-full bg-black bg-opacity-50 p-1 group-hover:visible hover:visible" > <CarbonPen class="mx-auto my-auto h-full cursor-pointer text-center text-white" /> </label> </div> <div class="mx-auto w-max pt-1"> <button type="button" on:click|stopPropagation|preventDefault={() => { files = null; deleteExistingAvatar = true; }} class="mx-auto w-max text-center text-xs text-gray-600 hover:underline" > Delete </button> </div> {:else} <div class="mb-1 flex w-max flex-row gap-4"> <label for="avatar" class="btn flex h-8 rounded-lg border bg-white px-3 py-1 text-gray-500 shadow-sm transition-all hover:bg-gray-100" > <CarbonUpload class="mr-2 text-xs " /> Upload </label> </div> {/if} <p class="text-xs text-red-500">{getError("avatar", form)}</p> </div> <label> <div class="mb-1 font-semibold">Name</div> <input name="name" class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" placeholder="Assistant Name" value={assistant?.name ?? ""} /> <p class="text-xs text-red-500">{getError("name", form)}</p> </label> <label> <div class="mb-1 font-semibold">Description</div> <textarea name="description" class="h-15 w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" placeholder="It knows everything about python" value={assistant?.description ?? ""} /> <p class="text-xs text-red-500">{getError("description", form)}</p> </label> <label> <div class="mb-1 font-semibold">Model</div> <div class="flex gap-2"> <select name="modelId" class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" bind:value={modelId} > {#each models.filter((model) => !model.unlisted) as model} <option value={model.id}>{model.displayName}</option> {/each} <p class="text-xs text-red-500">{getError("modelId", form)}</p> </select> <button type="button" class="flex aspect-square items-center gap-2 whitespace-nowrap rounded-lg border px-3 {showModelSettings ? 'border-blue-500/20 bg-blue-50 text-blue-600' : ''}" on:click={() => (showModelSettings = !showModelSettings)} ><CarbonSettingsAdjust class="text-xs" /></button > </div> <div class="mt-2 rounded-lg border border-blue-500/20 bg-blue-500/5 px-2 py-0.5" class:hidden={!showModelSettings} > <p class="text-xs text-red-500">{getError("inputMessage1", form)}</p> <div class="my-2 grid grid-cols-1 gap-2.5 sm:grid-cols-2 sm:grid-rows-2"> <label for="temperature" class="flex justify-between"> <span class="m-1 ml-0 flex items-center gap-1.5 whitespace-nowrap text-sm"> Temperature <HoverTooltip label="Temperature: Controls creativity, higher values allow more variety." > <CarbonHelpFilled class="inline text-xxs text-gray-500 group-hover/tooltip:text-blue-600" /> </HoverTooltip> </span> <input type="number" name="temperature" min="0.1" max="2" step="0.1" class="w-20 rounded-lg border-2 border-gray-200 bg-gray-100 px-2 py-1" placeholder={selectedModel?.parameters?.temperature?.toString() ?? "1"} value={assistant?.generateSettings?.temperature ?? ""} /> </label> <label for="top_p" class="flex justify-between"> <span class="m-1 ml-0 flex items-center gap-1.5 whitespace-nowrap text-sm"> Top P <HoverTooltip label="Top P: Sets word choice boundaries, lower values tighten focus." > <CarbonHelpFilled class="inline text-xxs text-gray-500 group-hover/tooltip:text-blue-600" /> </HoverTooltip> </span> <input type="number" name="top_p" class="w-20 rounded-lg border-2 border-gray-200 bg-gray-100 px-2 py-1" min="0.05" max="1" step="0.05" placeholder={selectedModel?.parameters?.top_p?.toString() ?? "1"} value={assistant?.generateSettings?.top_p ?? ""} /> </label> <label for="repetition_penalty" class="flex justify-between"> <span class="m-1 ml-0 flex items-center gap-1.5 whitespace-nowrap text-sm"> Repetition penalty <HoverTooltip label="Repetition penalty: Prevents reuse, higher values decrease repetition." > <CarbonHelpFilled class="inline text-xxs text-gray-500 group-hover/tooltip:text-blue-600" /> </HoverTooltip> </span> <input type="number" name="repetition_penalty" min="0.1" max="2" step="0.1" class="w-20 rounded-lg border-2 border-gray-200 bg-gray-100 px-2 py-1" placeholder={selectedModel?.parameters?.repetition_penalty?.toString() ?? "1.0"} value={assistant?.generateSettings?.repetition_penalty ?? ""} /> </label> <label for="top_k" class="flex justify-between"> <span class="m-1 ml-0 flex items-center gap-1.5 whitespace-nowrap text-sm"> Top K <HoverTooltip label="Top K: Restricts word options, lower values for predictability." > <CarbonHelpFilled class="inline text-xxs text-gray-500 group-hover/tooltip:text-blue-600" /> </HoverTooltip> </span> <input type="number" name="top_k" min="5" max="100" step="5" class="w-20 rounded-lg border-2 border-gray-200 bg-gray-100 px-2 py-1" placeholder={selectedModel?.parameters?.top_k?.toString() ?? "50"} value={assistant?.generateSettings?.top_k ?? ""} /> </label> </div> </div> </label> <label> <div class="mb-1 font-semibold">User start messages</div> <div class="grid gap-1.5 text-sm md:grid-cols-2"> <input name="exampleInput1" placeholder="Start Message 1" bind:value={inputMessage1} class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" /> <input name="exampleInput2" placeholder="Start Message 2" bind:value={inputMessage2} class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" /> <input name="exampleInput3" placeholder="Start Message 3" bind:value={inputMessage3} class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" /> <input name="exampleInput4" placeholder="Start Message 4" bind:value={inputMessage4} class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" /> </div> <p class="text-xs text-red-500">{getError("inputMessage1", form)}</p> </label> {#if $page.data.user?.isEarlyAccess && selectedModel?.tools} <div> <span class="text-smd font-semibold" >Tools <CarbonTools class="inline text-xs text-purple-600" /> <span class="ml-1 rounded bg-gray-100 px-1 py-0.5 text-xxs font-normal text-gray-600" >Experimental</span > </span> <p class="text-xs text-gray-500"> Choose up to 3 community tools that will be used with this assistant. </p> </div> <AssistantToolPicker bind:toolIds={tools} /> {/if} {#if $page.data.enableAssistantsRAG} <div class="flex flex-col flex-nowrap pb-4"> <span class="mt-2 text-smd font-semibold" >Internet access <IconInternet classNames="inline text-sm text-blue-600" /> {#if isHuggingChat} <a href="https://huggingface.co/spaces/huggingchat/chat-ui/discussions/385" target="_blank" class="ml-0.5 rounded bg-gray-100 px-1 py-0.5 text-xxs font-normal text-gray-700 underline decoration-gray-400" >Give feedback</a > {/if} </span> <label class="mt-1"> <input checked={!ragMode} on:change={() => (ragMode = false)} type="radio" name="ragMode" value={false} /> <span class="my-2 text-sm" class:font-semibold={!ragMode}> Default </span> {#if !ragMode} <span class="block text-xs text-gray-500"> Assistant will not use internet to do information retrieval and will respond faster. Recommended for most Assistants. </span> {/if} </label> <label class="mt-1"> <input checked={ragMode === "all"} on:change={() => (ragMode = "all")} type="radio" name="ragMode" value={"all"} /> <span class="my-2 text-sm" class:font-semibold={ragMode === "all"}> Web search </span> {#if ragMode === "all"} <span class="block text-xs text-gray-500"> Assistant will do a web search on each user request to find information. </span> {/if} </label> <label class="mt-1"> <input checked={ragMode === "domains"} on:change={() => (ragMode = "domains")} type="radio" name="ragMode" value={false} /> <span class="my-2 text-sm" class:font-semibold={ragMode === "domains"}> Domains search </span> </label> {#if ragMode === "domains"} <span class="mb-2 text-xs text-gray-500"> Specify domains and URLs that the application can search, separated by commas. </span> <input name="ragDomainList" class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" placeholder="wikipedia.org,bbc.com" value={assistant?.rag?.allowedDomains?.join(",") ?? ""} /> <p class="text-xs text-red-500">{getError("ragDomainList", form)}</p> {/if} <label class="mt-1"> <input checked={ragMode === "links"} on:change={() => (ragMode = "links")} type="radio" name="ragMode" value={false} /> <span class="my-2 text-sm" class:font-semibold={ragMode === "links"}> Specific Links </span> </label> {#if ragMode === "links"} <span class="mb-2 text-xs text-gray-500"> Specify a maximum of 10 direct URLs that the Assistant will access. HTML & Plain Text only, separated by commas </span> <input name="ragLinkList" class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" placeholder="https://raw.githubusercontent.com/huggingface/chat-ui/main/README.md" value={assistant?.rag?.allowedLinks.join(",") ?? ""} /> <p class="text-xs text-red-500">{getError("ragLinkList", form)}</p> {/if} </div> {/if} </div> <div class="relative col-span-1 flex h-full flex-col"> <div class="mb-1 flex justify-between text-sm"> <span class="block font-semibold"> Instructions (System Prompt) </span> {#if dynamicPrompt && templateVariables.length} <div class="relative"> <button type="button" class="peer rounded bg-blue-500/20 px-1 text-xs text-blue-600 focus:bg-blue-500/30 focus:text-blue-800 sm:text-sm" > {templateVariables.length} template variable{templateVariables.length > 1 ? "s" : ""} </button> <div class="invisible absolute right-0 top-6 z-10 rounded-lg border bg-white p-2 text-xs shadow-lg peer-focus:visible hover:visible sm:w-96" > Will performs a GET request and injects the response into the prompt. Works better with plain text, csv or json content. {#each templateVariables as match} <a href={match} target="_blank" class="text-gray-500 underline decoration-gray-300" >{match}</a > {/each} </div> </div> {/if} </div> <label class="pb-2 text-sm has-[:checked]:font-semibold"> <input type="checkbox" name="dynamicPrompt" bind:checked={dynamicPrompt} /> Dynamic Prompt <p class="mb-2 text-xs font-normal text-gray-500"> Allow the use of template variables {"{{url=https://example.com/path}}"} to insert dynamic content into your prompt by making GET requests to specified URLs on each inference. </p> </label> <div class="relative mb-20 flex h-full flex-col gap-2"> <textarea name="preprompt" class="min-h-[8lh] flex-1 rounded-lg border-2 border-gray-200 bg-gray-100 p-2 text-sm" placeholder="You'll act as..." bind:value={systemPrompt} /> {#if modelId} {@const model = models.find((_model) => _model.id === modelId)} {#if model?.tokenizer && systemPrompt} <TokensCounter classNames="absolute bottom-4 right-4" prompt={systemPrompt} modelTokenizer={model.tokenizer} truncate={model?.parameters?.truncate} /> {/if} {/if} <p class="text-xs text-red-500">{getError("preprompt", form)}</p> </div> <div class="absolute bottom-6 flex w-full justify-end gap-2 md:right-0 md:w-fit"> <a href={assistant ? `${base}/settings/assistants/${assistant?._id}` : `${base}/settings`} class="flex items-center justify-center rounded-full bg-gray-200 px-5 py-2 font-semibold text-gray-600" > Cancel </a> <button type="submit" disabled={loading} aria-disabled={loading} class="flex items-center justify-center rounded-full bg-black px-8 py-2 font-semibold" class:bg-gray-200={loading} class:text-gray-600={loading} class:text-white={!loading} > {assistant ? "Save" : "Create"} </button> </div> </div> </div> </form>
chat-ui/src/lib/components/AssistantSettings.svelte/0
{ "file_path": "chat-ui/src/lib/components/AssistantSettings.svelte", "repo_id": "chat-ui", "token_count": 8951 }
61
<script lang="ts"> import CarbonCaretLeft from "~icons/carbon/caret-left"; import CarbonCaretRight from "~icons/carbon/caret-right"; export let href: string; export let direction: "next" | "previous"; export let isDisabled = false; </script> <a class="flex items-center rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800 {isDisabled ? 'pointer-events-none opacity-50' : ''}" {href} > {#if direction === "previous"} <CarbonCaretLeft classNames="mr-1.5" /> Previous {:else} Next <CarbonCaretRight classNames="ml-1.5" /> {/if} </a>
chat-ui/src/lib/components/PaginationArrow.svelte/0
{ "file_path": "chat-ui/src/lib/components/PaginationArrow.svelte", "repo_id": "chat-ui", "token_count": 226 }
62
<script lang="ts"> import { isDesktop } from "$lib/utils/isDesktop"; import { createEventDispatcher, onMount } from "svelte"; export let value = ""; export let minRows = 1; export let maxRows: null | number = null; export let placeholder = ""; export let disabled = false; let textareaElement: HTMLTextAreaElement; let isCompositionOn = false; const dispatch = createEventDispatcher<{ submit: void }>(); $: minHeight = `${1 + minRows * 1.5}em`; $: maxHeight = maxRows ? `${1 + maxRows * 1.5}em` : `auto`; function handleKeydown(event: KeyboardEvent) { // submit on enter if (event.key === "Enter" && !event.shiftKey && !isCompositionOn) { event.preventDefault(); // blur to close keyboard on mobile textareaElement.blur(); // refocus so that user on desktop can start typing without needing to reclick on textarea if (isDesktop(window)) { textareaElement.focus(); } dispatch("submit"); // use a custom event instead of `event.target.form.requestSubmit()` as it does not work on Safari 14 } } onMount(() => { if (isDesktop(window)) { textareaElement.focus(); } }); </script> <div class="relative min-w-0 flex-1" on:paste> <pre class="scrollbar-custom invisible overflow-x-hidden overflow-y-scroll whitespace-pre-wrap break-words p-3" aria-hidden="true" style="min-height: {minHeight}; max-height: {maxHeight}">{(value || " ") + "\n"}</pre> <textarea enterkeyhint="send" tabindex="0" rows="1" class="scrollbar-custom absolute top-0 m-0 h-full w-full resize-none scroll-p-3 overflow-x-hidden overflow-y-scroll border-0 bg-transparent p-3 outline-none focus:ring-0 focus-visible:ring-0" class:text-gray-400={disabled} bind:value bind:this={textareaElement} {disabled} on:keydown={handleKeydown} on:compositionstart={() => (isCompositionOn = true)} on:compositionend={() => (isCompositionOn = false)} on:beforeinput {placeholder} /> </div> <style> pre, textarea { font-family: inherit; box-sizing: border-box; line-height: 1.5; } </style>
chat-ui/src/lib/components/chat/ChatInput.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/ChatInput.svelte", "repo_id": "chat-ui", "token_count": 752 }
63
<script lang="ts"> export let src: string; export let name: string; import CarbonPause from "~icons/carbon/pause"; import CarbonPlay from "~icons/carbon/play"; let time = 0; let duration = 0; let paused = true; function format(time: number) { if (isNaN(time)) return "..."; const minutes = Math.floor(time / 60); const seconds = Math.floor(time % 60); return `${minutes}:${seconds < 10 ? `0${seconds}` : seconds}`; } function seek(e: PointerEvent) { if (!e.currentTarget) return; const { left, width } = (e.currentTarget as HTMLElement).getBoundingClientRect(); let p = (e.clientX - left) / width; if (p < 0) p = 0; if (p > 1) p = 1; time = p * duration; } </script> <div class="flex h-14 w-72 items-center gap-4 rounded-2xl border border-gray-200 bg-white p-2.5 text-gray-600 shadow-sm transition-all dark:border-gray-800 dark:bg-gray-900 dark:text-gray-300" > <audio {src} bind:currentTime={time} bind:duration bind:paused preload="metadata" on:ended={() => { time = 0; }} /> <button class="mx-auto my-auto aspect-square size-8 rounded-full border border-gray-400 bg-gray-100 dark:border-gray-800 dark:bg-gray-700" aria-label={paused ? "play" : "pause"} on:click={() => (paused = !paused)} > {#if paused} <CarbonPlay class="mx-auto my-auto text-gray-600 dark:text-gray-300" /> {:else} <CarbonPause class="mx-auto my-auto text-gray-600 dark:text-gray-300" /> {/if} </button> <div class="overflow-hidden"> <div class="truncate font-medium">{name}</div> {#if duration !== Infinity} <div class="flex items-center gap-2"> <span class="text-xs">{format(time)}</span> <div class="relative h-2 flex-1 rounded-full bg-gray-200 dark:bg-gray-700" on:pointerdown={() => { paused = true; }} on:pointerup={seek} > <div class="absolute inset-0 h-full bg-gray-400 dark:bg-gray-600" style="width: {(time / duration) * 100}%" /> </div> <span class="text-xs">{duration ? format(duration) : "--:--"}</span> </div> {/if} </div> </div>
chat-ui/src/lib/components/players/AudioPlayer.svelte/0
{ "file_path": "chat-ui/src/lib/components/players/AudioPlayer.svelte", "repo_id": "chat-ui", "token_count": 877 }
64
import { Issuer, type BaseClient, type UserinfoResponse, type TokenSet, custom, } from "openid-client"; import { addHours, addWeeks } from "date-fns"; import { env } from "$env/dynamic/private"; import { sha256 } from "$lib/utils/sha256"; import { z } from "zod"; import { dev } from "$app/environment"; import type { Cookies } from "@sveltejs/kit"; import { collections } from "$lib/server/database"; import JSON5 from "json5"; import { logger } from "$lib/server/logger"; export interface OIDCSettings { redirectURI: string; } export interface OIDCUserInfo { token: TokenSet; userData: UserinfoResponse; } const stringWithDefault = (value: string) => z .string() .default(value) .transform((el) => (el ? el : value)); export const OIDConfig = z .object({ CLIENT_ID: stringWithDefault(env.OPENID_CLIENT_ID), CLIENT_SECRET: stringWithDefault(env.OPENID_CLIENT_SECRET), PROVIDER_URL: stringWithDefault(env.OPENID_PROVIDER_URL), SCOPES: stringWithDefault(env.OPENID_SCOPES), NAME_CLAIM: stringWithDefault(env.OPENID_NAME_CLAIM).refine( (el) => !["preferred_username", "email", "picture", "sub"].includes(el), { message: "nameClaim cannot be one of the restricted keys." } ), TOLERANCE: stringWithDefault(env.OPENID_TOLERANCE), RESOURCE: stringWithDefault(env.OPENID_RESOURCE), }) .parse(JSON5.parse(env.OPENID_CONFIG)); export const requiresUser = !!OIDConfig.CLIENT_ID && !!OIDConfig.CLIENT_SECRET; export function refreshSessionCookie(cookies: Cookies, sessionId: string) { cookies.set(env.COOKIE_NAME, sessionId, { path: "/", // So that it works inside the space's iframe sameSite: dev || env.ALLOW_INSECURE_COOKIES === "true" ? "lax" : "none", secure: !dev && !(env.ALLOW_INSECURE_COOKIES === "true"), httpOnly: true, expires: addWeeks(new Date(), 2), }); } export async function findUser(sessionId: string) { const session = await collections.sessions.findOne({ sessionId }); if (!session) { return null; } return await collections.users.findOne({ _id: session.userId }); } export const authCondition = (locals: App.Locals) => { return locals.user ? { userId: locals.user._id } : { sessionId: locals.sessionId, userId: { $exists: false } }; }; /** * Generates a CSRF token using the user sessionId. Note that we don't need a secret because sessionId is enough. */ export async function generateCsrfToken(sessionId: string, redirectUrl: string): Promise<string> { const data = { expiration: addHours(new Date(), 1).getTime(), redirectUrl, }; return Buffer.from( JSON.stringify({ data, signature: await sha256(JSON.stringify(data) + "##" + sessionId), }) ).toString("base64"); } async function getOIDCClient(settings: OIDCSettings): Promise<BaseClient> { const issuer = await Issuer.discover(OIDConfig.PROVIDER_URL); return new issuer.Client({ client_id: OIDConfig.CLIENT_ID, client_secret: OIDConfig.CLIENT_SECRET, redirect_uris: [settings.redirectURI], response_types: ["code"], [custom.clock_tolerance]: OIDConfig.TOLERANCE || undefined, }); } export async function getOIDCAuthorizationUrl( settings: OIDCSettings, params: { sessionId: string } ): Promise<string> { const client = await getOIDCClient(settings); const csrfToken = await generateCsrfToken(params.sessionId, settings.redirectURI); return client.authorizationUrl({ scope: OIDConfig.SCOPES, state: csrfToken, resource: OIDConfig.RESOURCE || undefined, }); } export async function getOIDCUserData( settings: OIDCSettings, code: string, iss?: string ): Promise<OIDCUserInfo> { const client = await getOIDCClient(settings); const token = await client.callback(settings.redirectURI, { code, iss }); const userData = await client.userinfo(token); return { token, userData }; } export async function validateAndParseCsrfToken( token: string, sessionId: string ): Promise<{ /** This is the redirect url that was passed to the OIDC provider */ redirectUrl: string; } | null> { try { const { data, signature } = z .object({ data: z.object({ expiration: z.number().int(), redirectUrl: z.string().url(), }), signature: z.string().length(64), }) .parse(JSON.parse(token)); const reconstructSign = await sha256(JSON.stringify(data) + "##" + sessionId); if (data.expiration > Date.now() && signature === reconstructSign) { return { redirectUrl: data.redirectUrl }; } } catch (e) { logger.error(e); } return null; }
chat-ui/src/lib/server/auth.ts/0
{ "file_path": "chat-ui/src/lib/server/auth.ts", "repo_id": "chat-ui", "token_count": 1593 }
65
import { GoogleGenerativeAI, HarmBlockThreshold, HarmCategory } from "@google/generative-ai"; import type { Content, Part, TextPart } from "@google/generative-ai"; import { z } from "zod"; import type { Message, MessageFile } from "$lib/types/Message"; import type { TextGenerationStreamOutput } from "@huggingface/inference"; import type { Endpoint } from "../endpoints"; import { createImageProcessorOptionsValidator, makeImageProcessor } from "../images"; import type { ImageProcessorOptions } from "../images"; export const endpointGenAIParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("genai"), apiKey: z.string(), safetyThreshold: z .enum([ HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED, HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, HarmBlockThreshold.BLOCK_NONE, HarmBlockThreshold.BLOCK_ONLY_HIGH, ]) .optional(), multimodal: z .object({ image: createImageProcessorOptionsValidator({ supportedMimeTypes: ["image/png", "image/jpeg", "image/webp"], preferredMimeType: "image/webp", // The 4 / 3 compensates for the 33% increase in size when converting to base64 maxSizeInMB: (5 / 4) * 3, maxWidth: 4096, maxHeight: 4096, }), }) .default({}), }); export function endpointGenAI(input: z.input<typeof endpointGenAIParametersSchema>): Endpoint { const { model, apiKey, safetyThreshold, multimodal } = endpointGenAIParametersSchema.parse(input); const genAI = new GoogleGenerativeAI(apiKey); return async ({ messages, preprompt, generateSettings }) => { const parameters = { ...model.parameters, ...generateSettings }; const generativeModel = genAI.getGenerativeModel({ model: model.id ?? model.name, safetySettings: safetyThreshold ? [ { category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold: safetyThreshold, }, { category: HarmCategory.HARM_CATEGORY_HARASSMENT, threshold: safetyThreshold, }, { category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold: safetyThreshold, }, { category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold: safetyThreshold, }, { category: HarmCategory.HARM_CATEGORY_UNSPECIFIED, threshold: safetyThreshold, }, ] : undefined, generationConfig: { maxOutputTokens: parameters?.max_new_tokens ?? 4096, stopSequences: parameters?.stop, temperature: parameters?.temperature ?? 1, }, }); let systemMessage = preprompt; if (messages[0].from === "system") { systemMessage = messages[0].content; messages.shift(); } const genAIMessages = await Promise.all( messages.map(async ({ from, content, files }: Omit<Message, "id">): Promise<Content> => { return { role: from === "user" ? "user" : "model", parts: [ ...(await Promise.all( (files ?? []).map((file) => fileToImageBlock(file, multimodal.image)) )), { text: content }, ], }; }) ); const result = await generativeModel.generateContentStream({ contents: genAIMessages, systemInstruction: systemMessage && systemMessage.trim() !== "" ? { role: "system", parts: [{ text: systemMessage }], } : undefined, }); let tokenId = 0; return (async function* () { let generatedText = ""; for await (const data of result.stream) { if (!data?.candidates?.length) break; // Handle case where no candidates are present const candidate = data.candidates[0]; if (!candidate.content?.parts?.length) continue; // Skip if no parts are present const firstPart = candidate.content.parts.find((part) => "text" in part) as | TextPart | undefined; if (!firstPart) continue; // Skip if no text part is found const content = firstPart.text; generatedText += content; const output: TextGenerationStreamOutput = { token: { id: tokenId++, text: content, logprob: 0, special: false, }, generated_text: null, details: null, }; yield output; } const output: TextGenerationStreamOutput = { token: { id: tokenId++, text: "", logprob: 0, special: true, }, generated_text: generatedText, details: null, }; yield output; })(); }; } async function fileToImageBlock( file: MessageFile, opts: ImageProcessorOptions<"image/png" | "image/jpeg" | "image/webp"> ): Promise<Part> { const processor = makeImageProcessor(opts); const { image, mime } = await processor(file); return { inlineData: { mimeType: mime, data: image.toString("base64"), }, }; } export default endpointGenAI;
chat-ui/src/lib/server/endpoints/google/endpointGenAI.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/google/endpointGenAI.ts", "repo_id": "chat-ui", "token_count": 1928 }
66
import { Address6, Address4 } from "ip-address"; import dns from "node:dns"; const dnsLookup = (hostname: string): Promise<{ address: string; family: number }> => { return new Promise((resolve, reject) => { dns.lookup(hostname, (err, address, family) => { if (err) return reject(err); resolve({ address, family }); }); }); }; export async function isURLLocal(URL: URL): Promise<boolean> { const { address, family } = await dnsLookup(URL.hostname); if (family === 4) { const addr = new Address4(address); const localSubnet = new Address4("127.0.0.0/8"); return addr.isInSubnet(localSubnet); } if (family === 6) { const addr = new Address6(address); return addr.isLoopback() || addr.isInSubnet(new Address6("::1/128")) || addr.isLinkLocal(); } throw Error("Unknown IP family"); } export function isURLStringLocal(url: string) { try { const urlObj = new URL(url); return isURLLocal(urlObj); } catch (e) { // assume local if URL parsing fails return true; } }
chat-ui/src/lib/server/isURLLocal.ts/0
{ "file_path": "chat-ui/src/lib/server/isURLLocal.ts", "repo_id": "chat-ui", "token_count": 363 }
67
import type { ConfigTool } from "$lib/types/Tool"; import { ObjectId } from "mongodb"; import { runWebSearch } from "../../websearch/runWebSearch"; const websearch: ConfigTool = { _id: new ObjectId("00000000000000000000000A"), type: "config", description: "Search the web for answers to the user's query", color: "blue", icon: "wikis", displayName: "Web Search", name: "websearch", endpoint: null, inputs: [ { name: "query", type: "str", description: "A search query which will be used to fetch the most relevant snippets regarding the user's query", paramType: "required", }, ], outputComponent: null, outputComponentIdx: null, showOutput: false, async *call({ query }, { conv, assistant, messages }) { const webSearchToolResults = yield* runWebSearch(conv, messages, assistant?.rag, String(query)); const chunks = webSearchToolResults?.contextSources .map(({ context }) => context) .join("\n------------\n"); return { outputs: [{ websearch: chunks }], display: false, }; }, }; export default websearch;
chat-ui/src/lib/server/tools/web/search.ts/0
{ "file_path": "chat-ui/src/lib/server/tools/web/search.ts", "repo_id": "chat-ui", "token_count": 358 }
68
export interface SerializedHTMLElement { tagName: string; attributes: Record<string, string>; content: (SerializedHTMLElement | string)[]; }
chat-ui/src/lib/server/websearch/scrape/types.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/scrape/types.ts", "repo_id": "chat-ui", "token_count": 46 }
69
import { writable } from "svelte/store"; export const isAborted = writable<boolean>(false);
chat-ui/src/lib/stores/isAborted.ts/0
{ "file_path": "chat-ui/src/lib/stores/isAborted.ts", "repo_id": "chat-ui", "token_count": 30 }
70
import type { BackendModel } from "$lib/server/models"; export type Model = Pick< BackendModel, | "id" | "name" | "displayName" | "websiteUrl" | "datasetName" | "promptExamples" | "parameters" | "description" | "logoUrl" | "modelUrl" | "tokenizer" | "datasetUrl" | "preprompt" | "multimodal" | "unlisted" | "tools" >;
chat-ui/src/lib/types/Model.ts/0
{ "file_path": "chat-ui/src/lib/types/Model.ts", "repo_id": "chat-ui", "token_count": 151 }
71
export function deepestChild(el: HTMLElement): HTMLElement { if (el.lastElementChild && el.lastElementChild.nodeType !== Node.TEXT_NODE) { return deepestChild(el.lastElementChild as HTMLElement); } return el; }
chat-ui/src/lib/utils/deepestChild.ts/0
{ "file_path": "chat-ui/src/lib/utils/deepestChild.ts", "repo_id": "chat-ui", "token_count": 74 }
72
const PUNCTUATION_REGEX = /\p{P}/gu; function removeDiacritics(s: string, form: "NFD" | "NFKD" = "NFD"): string { return s.normalize(form).replace(/[\u0300-\u036f]/g, ""); } export function generateSearchTokens(value: string): string[] { const fullTitleToken = removeDiacritics(value) .replace(PUNCTUATION_REGEX, "") .replaceAll(/\s+/g, "") .toLowerCase(); return [ ...new Set([ ...removeDiacritics(value) .split(/\s+/) .map((word) => word.replace(PUNCTUATION_REGEX, "").toLowerCase()) .filter((word) => word.length), ...(fullTitleToken.length ? [fullTitleToken] : []), ]), ]; } function escapeForRegExp(s: string): string { return s.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string } export function generateQueryTokens(query: string): RegExp[] { return removeDiacritics(query) .split(/\s+/) .map((word) => word.replace(PUNCTUATION_REGEX, "").toLowerCase()) .filter((word) => word.length) .map((token) => new RegExp(`^${escapeForRegExp(token)}`)); }
chat-ui/src/lib/utils/searchTokens.ts/0
{ "file_path": "chat-ui/src/lib/utils/searchTokens.ts", "repo_id": "chat-ui", "token_count": 426 }
73
import { describe, expect, it } from "vitest"; import { isMessageId } from "./isMessageId"; import { v4 } from "uuid"; describe("isMessageId", () => { it("should return true for a valid message id", () => { expect(isMessageId(v4())).toBe(true); }); it("should return false for an invalid message id", () => { expect(isMessageId("1-2-3-4")).toBe(false); }); it("should return false for an empty string", () => { expect(isMessageId("")).toBe(false); }); });
chat-ui/src/lib/utils/tree/isMessageId.spec.ts/0
{ "file_path": "chat-ui/src/lib/utils/tree/isMessageId.spec.ts", "repo_id": "chat-ui", "token_count": 170 }
74
import { collections } from "$lib/server/database.js"; import { toolFromConfigs } from "$lib/server/tools/index.js"; import type { BaseTool, CommunityToolDB } from "$lib/types/Tool.js"; import { generateQueryTokens, generateSearchTokens } from "$lib/utils/searchTokens.js"; import type { Filter } from "mongodb"; export async function GET({ url, locals }) { // XXX: feature_flag_tools if (!locals.user?.isEarlyAccess) { return new Response("Not early access", { status: 403 }); } const query = url.searchParams.get("q")?.trim() ?? null; const queryTokens = !!query && generateQueryTokens(query); const filter: Filter<CommunityToolDB> = { ...(queryTokens && { searchTokens: { $all: queryTokens } }), featured: true, }; const matchingCommunityTools = await collections.tools .find(filter) .project<Pick<BaseTool, "_id" | "displayName" | "color" | "icon">>({ _id: 1, displayName: 1, color: 1, icon: 1, createdByName: 1, }) .sort({ useCount: -1 }) .limit(5) .toArray(); const matchingConfigTools = toolFromConfigs .filter((tool) => !tool?.isHidden) .filter((tool) => { if (queryTokens) { return generateSearchTokens(tool.displayName).some((token) => queryTokens.some((queryToken) => queryToken.test(token)) ); } return true; }) .map((tool) => ({ _id: tool._id, displayName: tool.displayName, color: tool.color, icon: tool.icon, createdByName: undefined, })); const tools = [...matchingConfigTools, ...matchingCommunityTools] satisfies Array< Pick<BaseTool, "_id" | "displayName" | "color" | "icon"> & { createdByName?: string } >; return Response.json(tools.map((tool) => ({ ...tool, _id: tool._id.toString() })).slice(0, 5)); }
chat-ui/src/routes/api/tools/search/+server.ts/0
{ "file_path": "chat-ui/src/routes/api/tools/search/+server.ts", "repo_id": "chat-ui", "token_count": 626 }
75
import { authCondition } from "$lib/server/auth"; import { collections } from "$lib/server/database"; import type { SharedConversation } from "$lib/types/SharedConversation"; import { getShareUrl } from "$lib/utils/getShareUrl"; import { hashConv } from "$lib/utils/hashConv"; import { error } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; import { nanoid } from "nanoid"; export async function POST({ params, url, locals }) { const conversation = await collections.conversations.findOne({ _id: new ObjectId(params.id), ...authCondition(locals), }); if (!conversation) { error(404, "Conversation not found"); } const hash = await hashConv(conversation); const existingShare = await collections.sharedConversations.findOne({ hash }); if (existingShare) { return new Response( JSON.stringify({ url: getShareUrl(url, existingShare._id), }), { headers: { "Content-Type": "application/json" } } ); } const shared: SharedConversation = { _id: nanoid(7), hash, createdAt: new Date(), updatedAt: new Date(), rootMessageId: conversation.rootMessageId, messages: conversation.messages, title: conversation.title, model: conversation.model, embeddingModel: conversation.embeddingModel, preprompt: conversation.preprompt, assistantId: conversation.assistantId, }; await collections.sharedConversations.insertOne(shared); // copy files from `${conversation._id}-` to `${shared._id}-` const files = await collections.bucket .find({ filename: { $regex: `${conversation._id}-` } }) .toArray(); await Promise.all( files.map(async (file) => { const newFilename = file.filename.replace(`${conversation._id}-`, `${shared._id}-`); // copy files from `${conversation._id}-` to `${shared._id}-` by downloading and reuploaidng const downloadStream = collections.bucket.openDownloadStream(file._id); const uploadStream = collections.bucket.openUploadStream(newFilename, { metadata: { ...file.metadata, conversation: shared._id.toString() }, }); downloadStream.pipe(uploadStream); }) ); return new Response( JSON.stringify({ url: getShareUrl(url, shared._id), }), { headers: { "Content-Type": "application/json" } } ); }
chat-ui/src/routes/conversation/[id]/share/+server.ts/0
{ "file_path": "chat-ui/src/routes/conversation/[id]/share/+server.ts", "repo_id": "chat-ui", "token_count": 760 }
76
<script lang="ts"> import { onMount } from "svelte"; import { base } from "$app/paths"; import { afterNavigate, goto } from "$app/navigation"; import { page } from "$app/stores"; import { useSettingsStore } from "$lib/stores/settings"; import CarbonClose from "~icons/carbon/close"; import CarbonArrowUpRight from "~icons/carbon/ArrowUpRight"; import CarbonAdd from "~icons/carbon/add"; import CarbonTextLongParagraph from "~icons/carbon/text-long-paragraph"; import UserIcon from "~icons/carbon/user"; import type { LayoutData } from "../$types"; export let data: LayoutData; let previousPage: string = base; let assistantsSection: HTMLHeadingElement; onMount(() => { if ($page.params?.assistantId) { assistantsSection.scrollIntoView(); } }); afterNavigate(({ from }) => { if (!from?.url.pathname.includes("settings")) { previousPage = from?.url.toString() || previousPage; } }); const settings = useSettingsStore(); </script> <div class="grid h-full w-full grid-cols-1 grid-rows-[auto,1fr] content-start gap-x-4 overflow-hidden p-4 md:grid-cols-3 md:grid-rows-[auto,1fr] md:p-8" > <div class="col-span-1 mb-4 flex items-center justify-between md:col-span-3"> <h2 class="text-xl font-bold">Settings</h2> <button class="btn rounded-lg" on:click={() => { goto(previousPage); }} > <CarbonClose class="text-xl text-gray-900 hover:text-black" /> </button> </div> <div class="col-span-1 flex flex-col overflow-y-auto whitespace-nowrap max-md:-mx-4 max-md:h-[245px] max-md:border max-md:border-b-2 md:pr-6" > <h3 class="pb-3 pl-3 pt-2 text-[.8rem] text-gray-800 sm:pl-1">Models</h3> {#each data.models.filter((el) => !el.unlisted) as model} <a href="{base}/settings/{model.id}" class="group flex h-10 flex-none items-center gap-2 pl-3 pr-2 text-sm text-gray-500 hover:bg-gray-100 md:rounded-xl {model.id === $page.params.model ? '!bg-gray-100 !text-gray-800' : ''}" > <div class="mr-auto truncate">{model.displayName}</div> {#if $settings.customPrompts?.[model.id]} <CarbonTextLongParagraph class="size-6 rounded-md border border-gray-300 p-1 text-gray-800" /> {/if} {#if model.id === $settings.activeModel} <div class="rounded-lg bg-black px-2 py-1.5 text-xs font-semibold leading-none text-white" > Active </div> {/if} </a> {/each} <!-- if its huggingchat, the number of assistants owned by the user must be non-zero to show the UI --> {#if data.enableAssistants} <h3 bind:this={assistantsSection} class="pl-3 pt-5 text-[.8rem] text-gray-800 sm:pl-1"> Assistants </h3> <!-- My Assistants --> <h4 class="py-2 pl-5 text-[.7rem] text-gray-600 sm:pl-1">My Assistants</h4> {#each data.assistants.filter((assistant) => assistant.createdByMe) as assistant} <a href="{base}/settings/assistants/{assistant._id.toString()}" class="group flex h-10 flex-none items-center gap-2 pl-2 pr-2 text-sm text-gray-500 hover:bg-gray-100 md:rounded-xl {assistant._id.toString() === $page.params.assistantId ? '!bg-gray-100 !text-gray-800' : ''}" > {#if assistant.avatar} <img src="{base}/settings/assistants/{assistant._id.toString()}/avatar.jpg?hash={assistant.avatar}" alt="Avatar" class="h-6 w-6 rounded-full" /> {:else} <div class="flex size-6 items-center justify-center rounded-full bg-gray-300 font-bold uppercase text-gray-500" > {assistant.name[0]} </div> {/if} <div class="truncate text-gray-900">{assistant.name}</div> {#if assistant._id.toString() === $settings.activeModel} <div class="ml-auto rounded-lg bg-black px-2 py-1.5 text-xs font-semibold leading-none text-white" > Active </div> {/if} </a> {/each} {#if !data.loginEnabled || (data.loginEnabled && !!data.user)} <a href="{base}/settings/assistants/new" class="group flex h-10 flex-none items-center gap-2 pl-3 pr-2 text-sm text-gray-500 hover:bg-gray-100 md:rounded-xl {$page.url.pathname === `${base}/settings/assistants/new` ? '!bg-gray-100 !text-gray-800' : ''}" > <CarbonAdd /> <div class="truncate">Create new assistant</div> </a> {/if} <!-- Other Assistants --> <h4 class="pl-3 pt-3 text-[.7rem] text-gray-600 sm:pl-1">Other Assistants</h4> {#each data.assistants.filter((assistant) => !assistant.createdByMe) as assistant} <a href="{base}/settings/assistants/{assistant._id.toString()}" class="group flex h-10 flex-none items-center gap-2 pl-2 pr-2 text-sm text-gray-500 hover:bg-gray-100 md:rounded-xl {assistant._id.toString() === $page.params.assistantId ? '!bg-gray-100 !text-gray-800' : ''}" > {#if assistant.avatar} <img src="{base}/settings/assistants/{assistant._id.toString()}/avatar.jpg?hash={assistant.avatar}" alt="Avatar" class="h-6 w-6 rounded-full" /> {:else} <div class="flex size-6 items-center justify-center rounded-full bg-gray-300 font-bold uppercase text-gray-500" > {assistant.name[0]} </div> {/if} <div class="truncate">{assistant.name}</div> {#if assistant._id.toString() === $settings.activeModel} <div class="ml-auto rounded-lg bg-black px-2 py-1.5 text-xs font-semibold leading-none text-white" > Active </div> {/if} </a> {/each} <a href="{base}/assistants" class="group flex h-10 flex-none items-center gap-2 pl-3 pr-2 text-sm text-gray-500 hover:bg-gray-100 md:rounded-xl" ><CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs " /> <div class="truncate">Browse Assistants</div> </a> {/if} <div class="my-2 mt-auto w-full border-b border-gray-200" /> <a href="{base}/settings" class="group flex h-10 flex-none items-center gap-2 pl-3 pr-2 text-sm text-gray-500 hover:bg-gray-100 max-md:order-first md:rounded-xl {$page.url.pathname === `${base}/settings` ? '!bg-gray-100 !text-gray-800' : ''}" > <UserIcon class="text-sm" /> Application Settings </a> </div> <div class="col-span-1 w-full overflow-y-auto overflow-x-clip px-1 max-md:pt-4 md:col-span-2 md:row-span-2" > <slot /> </div> </div>
chat-ui/src/routes/settings/(nav)/+layout.svelte/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/+layout.svelte", "repo_id": "chat-ui", "token_count": 2812 }
77
<slot />
chat-ui/src/routes/tools/+layout.svelte/0
{ "file_path": "chat-ui/src/routes/tools/+layout.svelte", "repo_id": "chat-ui", "token_count": 4 }
78
{ "background_color": "#ffffff", "name": "HuggingChat", "short_name": "HuggingChat", "display": "standalone", "start_url": "/chat", "icons": [ { "src": "/chat/huggingchat/icon-36x36.png", "sizes": "36x36", "type": "image/png" }, { "src": "/chat/huggingchat/icon-48x48.png", "sizes": "48x48", "type": "image/png" }, { "src": "/chat/huggingchat/icon-72x72.png", "sizes": "72x72", "type": "image/png" }, { "src": "/chat/huggingchat/icon-96x96.png", "sizes": "96x96", "type": "image/png" }, { "src": "/chat/huggingchat/icon-128x128.png", "sizes": "128x128", "type": "image/png" }, { "src": "/chat/huggingchat/icon-144x144.png", "sizes": "144x144", "type": "image/png" }, { "src": "/chat/huggingchat/icon-192x192.png", "sizes": "192x192", "type": "image/png" }, { "src": "/chat/huggingchat/icon-256x256.png", "sizes": "256x256", "type": "image/png" }, { "src": "/chat/huggingchat/icon-512x512.png", "sizes": "512x512", "type": "image/png" } ] }
chat-ui/static/huggingchat/manifest.json/0
{ "file_path": "chat-ui/static/huggingchat/manifest.json", "repo_id": "chat-ui", "token_count": 569 }
79
# This first_section was backported from nginx loading_datasets: loading share_dataset: share quicktour: quickstart dataset_streaming: stream torch_tensorflow: use_dataset splits: loading#slice-splits processing: process faiss_and_ea: faiss_es features: about_dataset_features exploring: access package_reference/logging_methods: package_reference/utilities # end of first_section
datasets/docs/source/_redirects.yml/0
{ "file_path": "datasets/docs/source/_redirects.yml", "repo_id": "datasets", "token_count": 122 }
80
# Create a dataset loading script <Tip> The dataset loading script is likely not needed if your dataset is in one of the following formats: CSV, JSON, JSON lines, text, images, audio or Parquet. With those formats, you should be able to load your dataset automatically with [`~datasets.load_dataset`], as long as your dataset repository has a [required structure](./repository_structure). </Tip> <Tip warning=true> For security reasons, 🤗 Datasets do not allow running dataset loading scripts by default, and you have to pass `trust_remote_code=True` to load datasets that require running a dataset script. </Tip> Write a dataset script to load and share datasets that consist of data files in unsupported formats or require more complex data preparation. This is a more advanced way to define a dataset than using [YAML metadata in the dataset card](./repository_structure#define-your-splits-in-yaml). A dataset script is a Python file that defines the different configurations and splits of your dataset, as well as how to download and process the data. The script can download data files from any website, or from the same dataset repository. A dataset loading script should have the same name as a dataset repository or directory. For example, a repository named `my_dataset` should contain `my_dataset.py` script. This way it can be loaded with: ``` my_dataset/ ├── README.md └── my_dataset.py ``` ```py >>> from datasets import load_dataset >>> load_dataset("path/to/my_dataset") ``` The following guide includes instructions for dataset scripts for how to: - Add dataset metadata. - Download data files. - Generate samples. - Generate dataset metadata. - Upload a dataset to the Hub. Open the [SQuAD dataset loading script](https://huggingface.co/datasets/squad/blob/main/squad.py) template to follow along on how to share a dataset. <Tip> To help you get started, try beginning with the dataset loading script [template](https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py)! </Tip> ## Add dataset attributes The first step is to add some information, or attributes, about your dataset in [`DatasetBuilder._info`]. The most important attributes you should specify are: 1. `DatasetInfo.description` provides a concise description of your dataset. The description informs the user what's in the dataset, how it was collected, and how it can be used for a NLP task. 2. `DatasetInfo.features` defines the name and type of each column in your dataset. This will also provide the structure for each example, so it is possible to create nested subfields in a column if you want. Take a look at [`Features`] for a full list of feature types you can use. ```py datasets.Features( { "id": datasets.Value("string"), "title": datasets.Value("string"), "context": datasets.Value("string"), "question": datasets.Value("string"), "answers": datasets.Sequence( { "text": datasets.Value("string"), "answer_start": datasets.Value("int32"), } ), } ) ``` 3. `DatasetInfo.homepage` contains the URL to the dataset homepage so users can find more details about the dataset. 4. `DatasetInfo.citation` contains a BibTeX citation for the dataset. After you've filled out all these fields in the template, it should look like the following example from the SQuAD loading script: ```py def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "title": datasets.Value("string"), "context": datasets.Value("string"), "question": datasets.Value("string"), "answers": datasets.features.Sequence( {"text": datasets.Value("string"), "answer_start": datasets.Value("int32"),} ), } ), # No default supervised_keys (as we have to pass both question # and context as input). supervised_keys=None, homepage="https://rajpurkar.github.io/SQuAD-explorer/", citation=_CITATION, ) ``` ### Multiple configurations In some cases, your dataset may have multiple configurations. For example, the [SuperGLUE](https://huggingface.co/datasets/super_glue) dataset is a collection of 5 datasets designed to evaluate language understanding tasks. 🤗 Datasets provides [`BuilderConfig`] which allows you to create different configurations for the user to select from. Let's study the [SuperGLUE loading script](https://huggingface.co/datasets/super_glue/blob/main/super_glue.py) to see how you can define several configurations. 1. Create a [`BuilderConfig`] subclass with attributes about your dataset. These attributes can be the features of your dataset, label classes, and a URL to the data files. ```py class SuperGlueConfig(datasets.BuilderConfig): """BuilderConfig for SuperGLUE.""" def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs): """BuilderConfig for SuperGLUE. Args: features: *list[string]*, list of the features that will appear in the feature dict. Should not include "label". data_url: *string*, url to download the zip file from. citation: *string*, citation for the data set. url: *string*, url for information about the data set. label_classes: *list[string]*, the list of classes for the label if the label is present as a string. Non-string labels will be cast to either 'False' or 'True'. **kwargs: keyword arguments forwarded to super. """ # Version history: # 1.0.2: Fixed non-nondeterminism in ReCoRD. # 1.0.1: Change from the pre-release trial version of SuperGLUE (v1.9) to # the full release (v2.0). # 1.0.0: S3 (new shuffling, sharding and slicing mechanism). # 0.0.2: Initial version. super().__init__(version=datasets.Version("1.0.2"), **kwargs) self.features = features self.label_classes = label_classes self.data_url = data_url self.citation = citation self.url = url ``` 2. Create instances of your config to specify the values of the attributes of each configuration. This gives you the flexibility to specify all the name and description of each configuration. These sub-class instances should be listed under `DatasetBuilder.BUILDER_CONFIGS`: ```py class SuperGlue(datasets.GeneratorBasedBuilder): """The SuperGLUE benchmark.""" BUILDER_CONFIG_CLASS = SuperGlueConfig BUILDER_CONFIGS = [ SuperGlueConfig( name="boolq", description=_BOOLQ_DESCRIPTION, features=["question", "passage"], data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/BoolQ.zip", citation=_BOOLQ_CITATION, url="https://github.com/google-research-datasets/boolean-questions", ), ... ... SuperGlueConfig( name="axg", description=_AXG_DESCRIPTION, features=["premise", "hypothesis"], label_classes=["entailment", "not_entailment"], data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/AX-g.zip", citation=_AXG_CITATION, url="https://github.com/rudinger/winogender-schemas", ), ``` 3. Now, users can load a specific configuration of the dataset with the configuration `name`: ```py >>> from datasets import load_dataset >>> dataset = load_dataset('super_glue', 'boolq') ``` Additionally, users can instantiate a custom builder configuration by passing the builder configuration arguments to [`load_dataset`]: ```py >>> from datasets import load_dataset >>> dataset = load_dataset('super_glue', data_url="https://custom_url") ``` ### Default configurations Users must specify a configuration name when they load a dataset with multiple configurations. Otherwise, 🤗 Datasets will raise a `ValueError`, and prompt the user to select a configuration name. You can avoid this by setting a default dataset configuration with the `DEFAULT_CONFIG_NAME` attribute: ```py class NewDataset(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.1.0") BUILDER_CONFIGS = [ datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"), datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"), ] DEFAULT_CONFIG_NAME = "first_domain" ``` <Tip warning={true}> Only use a default configuration when it makes sense. Don't set one because it may be more convenient for the user to not specify a configuration when they load your dataset. For example, multi-lingual datasets often have a separate configuration for each language. An appropriate default may be an aggregated configuration that loads all the languages of the dataset if the user doesn't request a particular one. </Tip> ## Download data files and organize splits After you've defined the attributes of your dataset, the next step is to download the data files and organize them according to their splits. 1. Create a dictionary of URLs in the loading script that point to the original SQuAD data files: ```py _URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset/" _URLS = { "train": _URL + "train-v1.1.json", "dev": _URL + "dev-v1.1.json", } ``` <Tip> If the data files live in the same folder or repository of the dataset script, you can just pass the relative paths to the files instead of URLs. </Tip> 2. [`DownloadManager.download_and_extract`] takes this dictionary and downloads the data files. Once the files are downloaded, use [`SplitGenerator`] to organize each split in the dataset. This is a simple class that contains: - The `name` of each split. You should use the standard split names: `Split.TRAIN`, `Split.TEST`, and `Split.VALIDATION`. - `gen_kwargs` provides the file paths to the data files to load for each split. Your `DatasetBuilder._split_generator()` should look like this now: ```py def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: urls_to_download = self._URLS downloaded_files = dl_manager.download_and_extract(urls_to_download) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), ] ``` ## Generate samples At this point, you have: - Added the dataset attributes. - Provided instructions for how to download the data files. - Organized the splits. The next step is to actually generate the samples in each split. 1. `DatasetBuilder._generate_examples` takes the file path provided by `gen_kwargs` to read and parse the data files. You need to write a function that loads the data files and extracts the columns. 2. Your function should yield a tuple of an `id_`, and an example from the dataset. ```py def _generate_examples(self, filepath): """This function returns the examples in the raw (text) form.""" logger.info("generating examples from = %s", filepath) with open(filepath) as f: squad = json.load(f) for article in squad["data"]: title = article.get("title", "").strip() for paragraph in article["paragraphs"]: context = paragraph["context"].strip() for qa in paragraph["qas"]: question = qa["question"].strip() id_ = qa["id"] answer_starts = [answer["answer_start"] for answer in qa["answers"]] answers = [answer["text"].strip() for answer in qa["answers"]] # Features currently used are "context", "question", and "answers". # Others are extracted here for the ease of future expansions. yield id_, { "title": title, "context": context, "question": question, "id": id_, "answers": {"answer_start": answer_starts, "text": answers,}, } ``` ## (Optional) Generate dataset metadata Adding dataset metadata is a great way to include information about your dataset. The metadata is stored in the dataset card `README.md` in YAML. It includes information like the number of examples required to confirm the dataset was correctly generated, and information about the dataset like its `features`. Run the following command to generate your dataset metadata in `README.md` and make sure your new dataset loading script works correctly: ``` datasets-cli test path/to/<your-dataset-loading-script> --save_info --all_configs ``` If your dataset loading script passed the test, you should now have a `README.md` file in your dataset folder containing a `dataset_info` field with some metadata. ## Upload to the Hub Once your script is ready, [create a dataset card](dataset_card) and [upload it to the Hub](share). Congratulations, you can now load your dataset from the Hub! 🥳 ```py >>> from datasets import load_dataset >>> load_dataset("<username>/my_dataset") ``` ## Advanced features ### Sharding If your dataset is made of many big files, 🤗 Datasets automatically runs your script in parallel to make it super fast! It can help if you have hundreds or thousands of TAR archives, or JSONL files like [oscar](https://huggingface.co/datasets/oscar/blob/main/oscar.py) for example. To make it work, we consider lists of files in `gen_kwargs` to be shards. Therefore 🤗 Datasets can automatically spawn several workers to run `_generate_examples` in parallel, and each worker is given a subset of shards to process. ```python class MyShardedDataset(datasets.GeneratorBasedBuilder): def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: downloaded_files = dl_manager.download([f"data/shard_{i}.jsonl" for i in range(1024)]) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files}), ] def _generate_examples(self, filepaths): # Each worker can be given a slice of the original `filepaths` list defined in the `gen_kwargs` # so that this code can run in parallel on several shards at the same time for filepath in filepaths: ... ``` Users can also specify `num_proc=` in `load_dataset()` to specify the number of processes to use as workers. ### ArrowBasedBuilder For some datasets it can be much faster to yield batches of data rather than examples one by one. You can speed up the dataset generation by yielding Arrow tables directly, instead of examples. This is especially useful if your data comes from Pandas DataFrames for example, since the conversion from Pandas to Arrow is as simple as: ```python import pyarrow as pa pa_table = pa.Table.from_pandas(df) ``` To yield Arrow tables instead of single examples, make your dataset builder inherit from [`ArrowBasedBuilder`] instead of [`GeneratorBasedBuilder`], and use `_generate_tables` instead of `_generate_examples`: ```python class MySuperFastDataset(datasets.ArrowBasedBuilder): def _generate_tables(self, filepaths): idx = 0 for filepath in filepaths: ... yield idx, pa_table idx += 1 ``` Don't forget to keep your script memory efficient, in case users run them on machines with a low amount of RAM.
datasets/docs/source/dataset_script.mdx/0
{ "file_path": "datasets/docs/source/dataset_script.mdx", "repo_id": "datasets", "token_count": 5373 }
81
# Process text data This guide shows specific methods for processing text datasets. Learn how to: - Tokenize a dataset with [`~Dataset.map`]. - Align dataset labels with label ids for NLI datasets. For a guide on how to process any type of dataset, take a look at the <a class="underline decoration-sky-400 decoration-2 font-semibold" href="./process">general process guide</a>. ## Map The [`~Dataset.map`] function supports processing batches of examples at once which speeds up tokenization. Load a tokenizer from 🤗 [Transformers](https://huggingface.co/transformers/): ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") ``` Set the `batched` parameter to `True` in the [`~Dataset.map`] function to apply the tokenizer to batches of examples: ```py >>> dataset = dataset.map(lambda examples: tokenizer(examples["text"]), batched=True) >>> dataset[0] {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1, 'input_ids': [101, 1996, 2600, 2003, 16036, 2000, 2022, 1996, 7398, 2301, 1005, 1055, 2047, 1000, 16608, 1000, 1998, 2008, 2002, 1005, 1055, 2183, 2000, 2191, 1037, 17624, 2130, 3618, 2084, 7779, 29058, 8625, 13327, 1010, 3744, 1011, 18856, 19513, 3158, 5477, 4168, 2030, 7112, 16562, 2140, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` The [`~Dataset.map`] function converts the returned values to a PyArrow-supported format. But explicitly returning the tensors as NumPy arrays is faster because it is a natively supported PyArrow format. Set `return_tensors="np"` when you tokenize your text: ```py >>> dataset = dataset.map(lambda examples: tokenizer(examples["text"], return_tensors="np"), batched=True) ``` ## Align The [`~Dataset.align_labels_with_mapping`] function aligns a dataset label id with the label name. Not all 🤗 Transformers models follow the prescribed label mapping of the original dataset, especially for NLI datasets. For example, the [MNLI](https://huggingface.co/datasets/glue) dataset uses the following label mapping: ```py >>> label2id = {"entailment": 0, "neutral": 1, "contradiction": 2} ``` To align the dataset label mapping with the mapping used by a model, create a dictionary of the label name and id to align on: ```py >>> label2id = {"contradiction": 0, "neutral": 1, "entailment": 2} ``` Pass the dictionary of the label mappings to the [`~Dataset.align_labels_with_mapping`] function, and the column to align on: ```py >>> from datasets import load_dataset >>> mnli = load_dataset("glue", "mnli", split="train") >>> mnli_aligned = mnli.align_labels_with_mapping(label2id, "label") ``` You can also use this function to assign a custom mapping of labels to ids.
datasets/docs/source/nlp_process.mdx/0
{ "file_path": "datasets/docs/source/nlp_process.mdx", "repo_id": "datasets", "token_count": 1109 }
82
# Share a dataset to the Hub The [Hub](https://huggingface.co/datasets) is home to an extensive collection of community-curated and popular research datasets. We encourage you to share your dataset to the Hub to help grow the ML community and accelerate progress for everyone. All contributions are welcome; adding a dataset is just a drag and drop away! Start by [creating a Hugging Face Hub account](https://huggingface.co/join) if you don't have one yet. ## Upload with the Hub UI The Hub's web-based interface allows users without any developer experience to upload a dataset. ### Create a repository A repository hosts all your dataset files, including the revision history, making storing more than one dataset version possible. 1. Click on your profile and select **New Dataset** to create a new dataset repository. 2. Pick a name for your dataset, and choose whether it is a public or private dataset. A public dataset is visible to anyone, whereas a private dataset can only be viewed by you or members of your organization. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/create_repo.png"/> </div> ### Upload dataset 1. Once you've created a repository, navigate to the **Files and versions** tab to add a file. Select **Add file** to upload your dataset files. We support many text, audio, and image data extensions such as `.csv`, `.mp3`, and `.jpg` among many others. For text data extensions like `.csv`, `.json`, `.jsonl`, and `.txt`, we recommend compressing them before uploading to the Hub (to `.zip` or `.gz` file extension for example). Text file extensions are not tracked by Git LFS by default, and if they're greater than 10MB, they will not be committed and uploaded. Take a look at the `.gitattributes` file in your repository for a complete list of tracked file extensions. For this tutorial, you can use the following sample `.csv` files since they're small: <a href="https://huggingface.co/datasets/stevhliu/demo/raw/main/train.csv" download>train.csv</a>, <a href="https://huggingface.co/datasets/stevhliu/demo/raw/main/test.csv" download>test.csv</a>. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/upload_files.png"/> </div> 2. Drag and drop your dataset files and add a brief descriptive commit message. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/commit_files.png"/> </div> 3. After uploading your dataset files, they are stored in your dataset repository. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/files_stored.png"/> </div> ### Create a Dataset card Adding a Dataset card is super valuable for helping users find your dataset and understand how to use it responsibly. 1. Click on **Create Dataset Card** to create a Dataset card. This button creates a `README.md` file in your repository. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/dataset_card.png"/> </div> 2. At the top, you'll see the **Metadata UI** with several fields to select from like license, language, and task categories. These are the most important tags for helping users discover your dataset on the Hub. When you select an option from each field, they'll be automatically added to the top of the dataset card. You can also look at the [Dataset Card specifications](https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1), which has a complete set of (but not required) tag options like `annotations_creators`, to help you choose the appropriate tags. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/metadata_ui.png"/> </div> 3. Click on the **Import dataset card template** link at the top of the editor to automatically create a dataset card template. Filling out the template is a great way to introduce your dataset to the community and help users understand how to use it. For a detailed example of what a good Dataset card should look like, take a look at the [CNN DailyMail Dataset card](https://huggingface.co/datasets/cnn_dailymail). ### Load dataset Once your dataset is stored on the Hub, anyone can load it with the [`load_dataset`] function: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("stevhliu/demo") ``` ## Upload with Python Users who prefer to upload a dataset programmatically can use the [huggingface_hub](https://huggingface.co/docs/huggingface_hub/index) library. This library allows users to interact with the Hub from Python. 1. Begin by installing the library: ```bash pip install huggingface_hub ``` 2. To upload a dataset on the Hub in Python, you need to log in to your Hugging Face account: ```bash huggingface-cli login ``` 3. Use the [`push_to_hub()`](https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.DatasetDict.push_to_hub) function to help you add, commit, and push a file to your repository: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("stevhliu/demo") # dataset = dataset.map(...) # do all your processing here >>> dataset.push_to_hub("stevhliu/processed_demo") ``` To set your dataset as private, set the `private` parameter to `True`. This parameter will only work if you are creating a repository for the first time. ```py >>> dataset.push_to_hub("stevhliu/private_processed_demo", private=True) ``` To add a new configuration (or subset) to a dataset or to add a new split (train/validation/test), please refer to the [`Dataset.push_to_hub`] documentation. ### Privacy A private dataset is only accessible by you. Similarly, if you share a dataset within your organization, then members of the organization can also access the dataset. Load a private dataset by providing your authentication token to the `token` parameter: ```py >>> from datasets import load_dataset # Load a private individual dataset >>> dataset = load_dataset("stevhliu/demo", token=True) # Load a private organization dataset >>> dataset = load_dataset("organization/dataset_name", token=True) ``` ## What's next? Congratulations, you've completed the tutorials! 🥳 From here, you can go on to: - Learn more about how to use 🤗 Datasets other functions to [process your dataset](process). - [Stream large datasets](stream) without downloading it locally. - [Define your dataset splits and configurations](repository_structure) and share your dataset with the community. If you have any questions about 🤗 Datasets, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/datasets/10).
datasets/docs/source/upload_dataset.mdx/0
{ "file_path": "datasets/docs/source/upload_dataset.mdx", "repo_id": "datasets", "token_count": 1999 }
83
from abc import ABC, abstractmethod from argparse import ArgumentParser class BaseDatasetsCLICommand(ABC): @staticmethod @abstractmethod def register_subcommand(parser: ArgumentParser): raise NotImplementedError() @abstractmethod def run(self): raise NotImplementedError()
datasets/src/datasets/commands/__init__.py/0
{ "file_path": "datasets/src/datasets/commands/__init__.py", "repo_id": "datasets", "token_count": 107 }
84
__all__ = [ "Audio", "Array2D", "Array3D", "Array4D", "Array5D", "ClassLabel", "Features", "LargeList", "Sequence", "Value", "Image", "Translation", "TranslationVariableLanguages", ] from .audio import Audio from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, LargeList, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
datasets/src/datasets/features/__init__.py/0
{ "file_path": "datasets/src/datasets/features/__init__.py", "repo_id": "datasets", "token_count": 168 }
85
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """DatasetInfo record information we know about a dataset. This includes things that we know about the dataset statically, i.e.: - description - canonical location - does it have validation and tests splits - size - etc. This also includes the things that can and should be computed once we've processed the dataset as well: - number of examples (in each split) - etc. """ import copy import dataclasses import json import os import posixpath from dataclasses import dataclass from pathlib import Path from typing import ClassVar, Dict, List, Optional, Union import fsspec from fsspec.core import url_to_fs from huggingface_hub import DatasetCard, DatasetCardData from . import config from .features import Features from .splits import SplitDict from .utils import Version from .utils.logging import get_logger from .utils.py_utils import asdict, unique_values logger = get_logger(__name__) @dataclass class SupervisedKeysData: input: str = "" output: str = "" @dataclass class DownloadChecksumsEntryData: key: str = "" value: str = "" class MissingCachedSizesConfigError(Exception): """The expected cached sizes of the download file are missing.""" class NonMatchingCachedSizesError(Exception): """The prepared split doesn't have expected sizes.""" @dataclass class PostProcessedInfo: features: Optional[Features] = None resources_checksums: Optional[dict] = None def __post_init__(self): # Convert back to the correct classes when we reload from dict if self.features is not None and not isinstance(self.features, Features): self.features = Features.from_dict(self.features) @classmethod def from_dict(cls, post_processed_info_dict: dict) -> "PostProcessedInfo": field_names = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in post_processed_info_dict.items() if k in field_names}) @dataclass class DatasetInfo: """Information about a dataset. `DatasetInfo` documents datasets, including its name, version, and features. See the constructor arguments and properties for a full list. Not all fields are known on construction and may be updated later. Attributes: description (`str`): A description of the dataset. citation (`str`): A BibTeX citation of the dataset. homepage (`str`): A URL to the official homepage for the dataset. license (`str`): The dataset's license. It can be the name of the license or a paragraph containing the terms of the license. features ([`Features`], *optional*): The features used to specify the dataset's column types. post_processed (`PostProcessedInfo`, *optional*): Information regarding the resources of a possible post-processing of a dataset. For example, it can contain the information of an index. supervised_keys (`SupervisedKeysData`, *optional*): Specifies the input feature and the label for supervised learning if applicable for the dataset (legacy from TFDS). builder_name (`str`, *optional*): The name of the `GeneratorBasedBuilder` subclass used to create the dataset. Usually matched to the corresponding script name. It is also the snake_case version of the dataset builder class name. config_name (`str`, *optional*): The name of the configuration derived from [`BuilderConfig`]. version (`str` or [`Version`], *optional*): The version of the dataset. splits (`dict`, *optional*): The mapping between split name and metadata. download_checksums (`dict`, *optional*): The mapping between the URL to download the dataset's checksums and corresponding metadata. download_size (`int`, *optional*): The size of the files to download to generate the dataset, in bytes. post_processing_size (`int`, *optional*): Size of the dataset in bytes after post-processing, if any. dataset_size (`int`, *optional*): The combined size in bytes of the Arrow tables for all splits. size_in_bytes (`int`, *optional*): The combined size in bytes of all files associated with the dataset (downloaded files + Arrow files). **config_kwargs (additional keyword arguments): Keyword arguments to be passed to the [`BuilderConfig`] and used in the [`DatasetBuilder`]. """ # Set in the dataset scripts description: str = dataclasses.field(default_factory=str) citation: str = dataclasses.field(default_factory=str) homepage: str = dataclasses.field(default_factory=str) license: str = dataclasses.field(default_factory=str) features: Optional[Features] = None post_processed: Optional[PostProcessedInfo] = None supervised_keys: Optional[SupervisedKeysData] = None # Set later by the builder builder_name: Optional[str] = None dataset_name: Optional[str] = None # for packaged builders, to be different from builder_name config_name: Optional[str] = None version: Optional[Union[str, Version]] = None # Set later by `download_and_prepare` splits: Optional[dict] = None download_checksums: Optional[dict] = None download_size: Optional[int] = None post_processing_size: Optional[int] = None dataset_size: Optional[int] = None size_in_bytes: Optional[int] = None _INCLUDED_INFO_IN_YAML: ClassVar[List[str]] = [ "config_name", "download_size", "dataset_size", "features", "splits", ] def __post_init__(self): # Convert back to the correct classes when we reload from dict if self.features is not None and not isinstance(self.features, Features): self.features = Features.from_dict(self.features) if self.post_processed is not None and not isinstance(self.post_processed, PostProcessedInfo): self.post_processed = PostProcessedInfo.from_dict(self.post_processed) if self.version is not None and not isinstance(self.version, Version): if isinstance(self.version, str): self.version = Version(self.version) else: self.version = Version.from_dict(self.version) if self.splits is not None and not isinstance(self.splits, SplitDict): self.splits = SplitDict.from_split_dict(self.splits) if self.supervised_keys is not None and not isinstance(self.supervised_keys, SupervisedKeysData): if isinstance(self.supervised_keys, (tuple, list)): self.supervised_keys = SupervisedKeysData(*self.supervised_keys) else: self.supervised_keys = SupervisedKeysData(**self.supervised_keys) def write_to_directory(self, dataset_info_dir, pretty_print=False, storage_options: Optional[dict] = None): """Write `DatasetInfo` and license (if present) as JSON files to `dataset_info_dir`. Args: dataset_info_dir (`str`): Destination directory. pretty_print (`bool`, defaults to `False`): If `True`, the JSON will be pretty-printed with the indent level of 4. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.9.0"/> Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.info.write_to_directory("/path/to/directory/") ``` """ fs: fsspec.AbstractFileSystem fs, *_ = url_to_fs(dataset_info_dir, **(storage_options or {})) with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "wb") as f: self._dump_info(f, pretty_print=pretty_print) if self.license: with fs.open(posixpath.join(dataset_info_dir, config.LICENSE_FILENAME), "wb") as f: self._dump_license(f) def _dump_info(self, file, pretty_print=False): """Dump info in `file` file-like object open in bytes mode (to support remote files)""" file.write(json.dumps(asdict(self), indent=4 if pretty_print else None).encode("utf-8")) def _dump_license(self, file): """Dump license in `file` file-like object open in bytes mode (to support remote files)""" file.write(self.license.encode("utf-8")) @classmethod def from_merge(cls, dataset_infos: List["DatasetInfo"]): dataset_infos = [dset_info.copy() for dset_info in dataset_infos if dset_info is not None] if len(dataset_infos) > 0 and all(dataset_infos[0] == dset_info for dset_info in dataset_infos): # if all dataset_infos are equal we don't need to merge. Just return the first. return dataset_infos[0] description = "\n\n".join(unique_values(info.description for info in dataset_infos)).strip() citation = "\n\n".join(unique_values(info.citation for info in dataset_infos)).strip() homepage = "\n\n".join(unique_values(info.homepage for info in dataset_infos)).strip() license = "\n\n".join(unique_values(info.license for info in dataset_infos)).strip() features = None supervised_keys = None return cls( description=description, citation=citation, homepage=homepage, license=license, features=features, supervised_keys=supervised_keys, ) @classmethod def from_directory(cls, dataset_info_dir: str, storage_options: Optional[dict] = None) -> "DatasetInfo": """Create [`DatasetInfo`] from the JSON file in `dataset_info_dir`. This function updates all the dynamically generated fields (num_examples, hash, time of creation,...) of the [`DatasetInfo`]. This will overwrite all previous metadata. Args: dataset_info_dir (`str`): The directory containing the metadata file. This should be the root directory of a specific dataset version. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.9.0"/> Example: ```py >>> from datasets import DatasetInfo >>> ds_info = DatasetInfo.from_directory("/path/to/directory/") ``` """ fs: fsspec.AbstractFileSystem fs, *_ = url_to_fs(dataset_info_dir, **(storage_options or {})) logger.info(f"Loading Dataset info from {dataset_info_dir}") if not dataset_info_dir: raise ValueError("Calling DatasetInfo.from_directory() with undefined dataset_info_dir.") with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "r", encoding="utf-8") as f: dataset_info_dict = json.load(f) return cls.from_dict(dataset_info_dict) @classmethod def from_dict(cls, dataset_info_dict: dict) -> "DatasetInfo": field_names = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in dataset_info_dict.items() if k in field_names}) def update(self, other_dataset_info: "DatasetInfo", ignore_none=True): self_dict = self.__dict__ self_dict.update( **{ k: copy.deepcopy(v) for k, v in other_dataset_info.__dict__.items() if (v is not None or not ignore_none) } ) def copy(self) -> "DatasetInfo": return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()}) def _to_yaml_dict(self) -> dict: yaml_dict = {} dataset_info_dict = asdict(self) for key in dataset_info_dict: if key in self._INCLUDED_INFO_IN_YAML: value = getattr(self, key) if hasattr(value, "_to_yaml_list"): # Features, SplitDict yaml_dict[key] = value._to_yaml_list() elif hasattr(value, "_to_yaml_string"): # Version yaml_dict[key] = value._to_yaml_string() else: yaml_dict[key] = value return yaml_dict @classmethod def _from_yaml_dict(cls, yaml_data: dict) -> "DatasetInfo": yaml_data = copy.deepcopy(yaml_data) if yaml_data.get("features") is not None: yaml_data["features"] = Features._from_yaml_list(yaml_data["features"]) if yaml_data.get("splits") is not None: yaml_data["splits"] = SplitDict._from_yaml_list(yaml_data["splits"]) field_names = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in yaml_data.items() if k in field_names}) class DatasetInfosDict(Dict[str, DatasetInfo]): def write_to_directory(self, dataset_infos_dir, overwrite=False, pretty_print=False) -> None: total_dataset_infos = {} dataset_infos_path = os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME) dataset_readme_path = os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME) if not overwrite: total_dataset_infos = self.from_directory(dataset_infos_dir) total_dataset_infos.update(self) if os.path.exists(dataset_infos_path): # for backward compatibility, let's update the JSON file if it exists with open(dataset_infos_path, "w", encoding="utf-8") as f: dataset_infos_dict = { config_name: asdict(dset_info) for config_name, dset_info in total_dataset_infos.items() } json.dump(dataset_infos_dict, f, indent=4 if pretty_print else None) # Dump the infos in the YAML part of the README.md file if os.path.exists(dataset_readme_path): dataset_card = DatasetCard.load(dataset_readme_path) dataset_card_data = dataset_card.data else: dataset_card = None dataset_card_data = DatasetCardData() if total_dataset_infos: total_dataset_infos.to_dataset_card_data(dataset_card_data) dataset_card = ( DatasetCard("---\n" + str(dataset_card_data) + "\n---\n") if dataset_card is None else dataset_card ) dataset_card.save(Path(dataset_readme_path)) @classmethod def from_directory(cls, dataset_infos_dir) -> "DatasetInfosDict": logger.info(f"Loading Dataset Infos from {dataset_infos_dir}") # Load the info from the YAML part of README.md if os.path.exists(os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME)): dataset_card_data = DatasetCard.load(Path(dataset_infos_dir) / config.REPOCARD_FILENAME).data if "dataset_info" in dataset_card_data: return cls.from_dataset_card_data(dataset_card_data) if os.path.exists(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)): # this is just to have backward compatibility with dataset_infos.json files with open(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f: return cls( { config_name: DatasetInfo.from_dict(dataset_info_dict) for config_name, dataset_info_dict in json.load(f).items() } ) else: return cls() @classmethod def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "DatasetInfosDict": if isinstance(dataset_card_data.get("dataset_info"), (list, dict)): if isinstance(dataset_card_data["dataset_info"], list): return cls( { dataset_info_yaml_dict.get("config_name", "default"): DatasetInfo._from_yaml_dict( dataset_info_yaml_dict ) for dataset_info_yaml_dict in dataset_card_data["dataset_info"] } ) else: dataset_info = DatasetInfo._from_yaml_dict(dataset_card_data["dataset_info"]) dataset_info.config_name = dataset_card_data["dataset_info"].get("config_name", "default") return cls({dataset_info.config_name: dataset_info}) else: return cls() def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None: if self: # first get existing metadata info if "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], dict): dataset_metadata_infos = { dataset_card_data["dataset_info"].get("config_name", "default"): dataset_card_data["dataset_info"] } elif "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], list): dataset_metadata_infos = { config_metadata["config_name"]: config_metadata for config_metadata in dataset_card_data["dataset_info"] } else: dataset_metadata_infos = {} # update/rewrite existing metadata info with the one to dump total_dataset_infos = { **dataset_metadata_infos, **{config_name: dset_info._to_yaml_dict() for config_name, dset_info in self.items()}, } # the config_name from the dataset_infos_dict takes over the config_name of the DatasetInfo for config_name, dset_info_yaml_dict in total_dataset_infos.items(): dset_info_yaml_dict["config_name"] = config_name if len(total_dataset_infos) == 1: # use a struct instead of a list of configurations, since there's only one dataset_card_data["dataset_info"] = next(iter(total_dataset_infos.values())) config_name = dataset_card_data["dataset_info"].pop("config_name", None) if config_name != "default": # if config_name is not "default" preserve it and put at the first position dataset_card_data["dataset_info"] = { "config_name": config_name, **dataset_card_data["dataset_info"], } else: dataset_card_data["dataset_info"] = [] for config_name, dataset_info_yaml_dict in sorted(total_dataset_infos.items()): # add the config_name field in first position dataset_info_yaml_dict.pop("config_name", None) dataset_info_yaml_dict = {"config_name": config_name, **dataset_info_yaml_dict} dataset_card_data["dataset_info"].append(dataset_info_yaml_dict)
datasets/src/datasets/info.py/0
{ "file_path": "datasets/src/datasets/info.py", "repo_id": "datasets", "token_count": 8410 }
86
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Splits related API.""" import abc import collections import copy import dataclasses import re from dataclasses import dataclass from typing import Dict, List, Optional, Union from .arrow_reader import FileInstructions, make_file_instructions from .naming import _split_re from .utils.py_utils import NonMutableDict, asdict @dataclass class SplitInfo: name: str = dataclasses.field(default="", metadata={"include_in_asdict_even_if_is_default": True}) num_bytes: int = dataclasses.field(default=0, metadata={"include_in_asdict_even_if_is_default": True}) num_examples: int = dataclasses.field(default=0, metadata={"include_in_asdict_even_if_is_default": True}) shard_lengths: Optional[List[int]] = None # Deprecated # For backward compatibility, this field needs to always be included in files like # dataset_infos.json and dataset_info.json files # To do so, we always include it in the output of datasets.utils.py_utils.asdict(split_info) dataset_name: Optional[str] = dataclasses.field( default=None, metadata={"include_in_asdict_even_if_is_default": True} ) @property def file_instructions(self): """Returns the list of dict(filename, take, skip).""" # `self.dataset_name` is assigned in `SplitDict.add()`. instructions = make_file_instructions( name=self.dataset_name, split_infos=[self], instruction=str(self.name), ) return instructions.file_instructions @dataclass class SubSplitInfo: """Wrapper around a sub split info. This class expose info on the subsplit: ``` ds, info = datasets.load_dataset(..., split='train[75%:]', with_info=True) info.splits['train[75%:]'].num_examples ``` """ instructions: FileInstructions @property def num_examples(self): """Returns the number of example in the subsplit.""" return self.instructions.num_examples @property def file_instructions(self): """Returns the list of dict(filename, take, skip).""" return self.instructions.file_instructions class SplitBase(metaclass=abc.ABCMeta): # pylint: disable=line-too-long """Abstract base class for Split compositionality. See the [guide on splits](../loading#slice-splits) for more information. There are three parts to the composition: 1) The splits are composed (defined, merged, split,...) together before calling the `.as_dataset()` function. This is done with the `__add__`, `__getitem__`, which return a tree of `SplitBase` (whose leaf are the `NamedSplit` objects) ``` split = datasets.Split.TRAIN + datasets.Split.TEST.subsplit(datasets.percent[:50]) ``` 2) The `SplitBase` is forwarded to the `.as_dataset()` function to be resolved into actual read instruction. This is done by the `.get_read_instruction()` method which takes the real dataset splits (name, number of shards,...) and parse the tree to return a `SplitReadInstruction()` object ``` read_instruction = split.get_read_instruction(self.info.splits) ``` 3) The `SplitReadInstruction` is then used in the `tf.data.Dataset` pipeline to define which files to read and how to skip examples within file. """ # pylint: enable=line-too-long @abc.abstractmethod def get_read_instruction(self, split_dict): """Parse the descriptor tree and compile all read instructions together. Args: split_dict: `dict`, The `dict[split_name, SplitInfo]` of the dataset Returns: split_read_instruction: `SplitReadInstruction` """ raise NotImplementedError("Abstract method") def __eq__(self, other): """Equality: datasets.Split.TRAIN == 'train'.""" if isinstance(other, (NamedSplit, str)): return False raise NotImplementedError("Equality is not implemented between merged/sub splits.") def __ne__(self, other): """InEquality: datasets.Split.TRAIN != 'test'.""" return not self.__eq__(other) def __add__(self, other): """Merging: datasets.Split.TRAIN + datasets.Split.TEST.""" return _SplitMerged(self, other) def subsplit(self, arg=None, k=None, percent=None, weighted=None): # pylint: disable=redefined-outer-name """Divides this split into subsplits. There are 3 ways to define subsplits, which correspond to the 3 arguments `k` (get `k` even subsplits), `percent` (get a slice of the dataset with `datasets.percent`), and `weighted` (get subsplits with proportions specified by `weighted`). Example:: ``` # 50% train, 50% test train, test = split.subsplit(k=2) # 50% train, 25% test, 25% validation train, test, validation = split.subsplit(weighted=[2, 1, 1]) # Extract last 20% subsplit = split.subsplit(datasets.percent[-20:]) ``` Warning: k and weighted will be converted into percent which mean that values below the percent will be rounded up or down. The final split may be bigger to deal with remainders. For instance: ``` train, test, valid = split.subsplit(k=3) # 33%, 33%, 34% s1, s2, s3, s4 = split.subsplit(weighted=[2, 2, 1, 1]) # 33%, 33%, 16%, 18% ``` Args: arg: If no kwargs are given, `arg` will be interpreted as one of `k`, `percent`, or `weighted` depending on the type. For example: ``` split.subsplit(10) # Equivalent to split.subsplit(k=10) split.subsplit(datasets.percent[:-20]) # percent=datasets.percent[:-20] split.subsplit([1, 1, 2]) # weighted=[1, 1, 2] ``` k: `int` If set, subdivide the split into `k` equal parts. percent: `datasets.percent slice`, return a single subsplit corresponding to a slice of the original split. For example: `split.subsplit(datasets.percent[-20:]) # Last 20% of the dataset`. weighted: `list[int]`, return a list of subsplits whose proportions match the normalized sum of the list. For example: `split.subsplit(weighted=[1, 1, 2]) # 25%, 25%, 50%`. Returns: A subsplit or list of subsplits extracted from this split object. """ # Note that the percent kwargs redefine the outer name datasets.percent. This # is done for consistency (.subsplit(percent=datasets.percent[:40])) if sum(bool(x) for x in (arg, k, percent, weighted)) != 1: raise ValueError("Only one argument of subsplit should be set.") # Auto deduce k if isinstance(arg, int): k = arg elif isinstance(arg, slice): percent = arg elif isinstance(arg, list): weighted = arg if not (k or percent or weighted): raise ValueError( f"Invalid split argument {arg}. Only list, slice and int supported. " "One of k, weighted or percent should be set to a non empty value." ) def assert_slices_coverage(slices): # Ensure that the expended slices cover all percents. assert sum((list(range(*s.indices(100))) for s in slices), []) == list(range(100)) if k: if not 0 < k <= 100: raise ValueError(f"Subsplit k should be between 0 and 100, got {k}") shift = 100 // k slices = [slice(i * shift, (i + 1) * shift) for i in range(k)] # Round up last element to ensure all elements are taken slices[-1] = slice(slices[-1].start, 100) # Internal check to ensure full coverage assert_slices_coverage(slices) return tuple(_SubSplit(self, s) for s in slices) elif percent: return _SubSplit(self, percent) elif weighted: # Normalize the weighted sum total = sum(weighted) weighted = [100 * x // total for x in weighted] # Create the slice for each of the elements start = 0 stop = 0 slices = [] for v in weighted: stop += v slices.append(slice(start, stop)) start = stop # Round up last element to ensure all elements are taken slices[-1] = slice(slices[-1].start, 100) # Internal check to ensure full coverage assert_slices_coverage(slices) return tuple(_SubSplit(self, s) for s in slices) else: # Should not be possible raise ValueError("Could not determine the split") # 2 requirements: # 1. datasets.percent be sliceable # 2. datasets.percent be documented # # Instances are not documented, so we want datasets.percent to be a class, but to # have it be sliceable, we need this metaclass. class PercentSliceMeta(type): def __getitem__(cls, slice_value): if not isinstance(slice_value, slice): raise ValueError(f"datasets.percent should only be called with slice, not {slice_value}") return slice_value class PercentSlice(metaclass=PercentSliceMeta): # pylint: disable=line-too-long """Syntactic sugar for defining slice subsplits: `datasets.percent[75:-5]`. See the [guide on splits](../loading#slice-splits) for more information. """ # pylint: enable=line-too-long pass percent = PercentSlice # pylint: disable=invalid-name class _SplitMerged(SplitBase): """Represent two split descriptors merged together.""" def __init__(self, split1, split2): self._split1 = split1 self._split2 = split2 def get_read_instruction(self, split_dict): read_instruction1 = self._split1.get_read_instruction(split_dict) read_instruction2 = self._split2.get_read_instruction(split_dict) return read_instruction1 + read_instruction2 def __repr__(self): return f"({repr(self._split1)} + {repr(self._split2)})" class _SubSplit(SplitBase): """Represent a sub split of a split descriptor.""" def __init__(self, split, slice_value): self._split = split self._slice_value = slice_value def get_read_instruction(self, split_dict): return self._split.get_read_instruction(split_dict)[self._slice_value] def __repr__(self): slice_str = "{start}:{stop}" if self._slice_value.step is not None: slice_str += ":{step}" slice_str = slice_str.format( start="" if self._slice_value.start is None else self._slice_value.start, stop="" if self._slice_value.stop is None else self._slice_value.stop, step=self._slice_value.step, ) return f"{repr(self._split)}(datasets.percent[{slice_str}])" class NamedSplit(SplitBase): """Descriptor corresponding to a named split (train, test, ...). Example: Each descriptor can be composed with other using addition or slice: ```py split = datasets.Split.TRAIN.subsplit(datasets.percent[0:25]) + datasets.Split.TEST ``` The resulting split will correspond to 25% of the train split merged with 100% of the test split. A split cannot be added twice, so the following will fail: ```py split = ( datasets.Split.TRAIN.subsplit(datasets.percent[:25]) + datasets.Split.TRAIN.subsplit(datasets.percent[75:]) ) # Error split = datasets.Split.TEST + datasets.Split.ALL # Error ``` The slices can be applied only one time. So the following are valid: ```py split = ( datasets.Split.TRAIN.subsplit(datasets.percent[:25]) + datasets.Split.TEST.subsplit(datasets.percent[:50]) ) split = (datasets.Split.TRAIN + datasets.Split.TEST).subsplit(datasets.percent[:50]) ``` But this is not valid: ```py train = datasets.Split.TRAIN test = datasets.Split.TEST split = train.subsplit(datasets.percent[:25]).subsplit(datasets.percent[:25]) split = (train.subsplit(datasets.percent[:25]) + test).subsplit(datasets.percent[:50]) ``` """ def __init__(self, name): self._name = name split_names_from_instruction = [split_instruction.split("[")[0] for split_instruction in name.split("+")] for split_name in split_names_from_instruction: if not re.match(_split_re, split_name): raise ValueError(f"Split name should match '{_split_re}' but got '{split_name}'.") def __str__(self): return self._name def __repr__(self): return f"NamedSplit({self._name!r})" def __eq__(self, other): """Equality: datasets.Split.TRAIN == 'train'.""" if isinstance(other, NamedSplit): return self._name == other._name # pylint: disable=protected-access elif isinstance(other, SplitBase): return False elif isinstance(other, str): # Other should be string return self._name == other else: return False def __lt__(self, other): return self._name < other._name # pylint: disable=protected-access def __hash__(self): return hash(self._name) def get_read_instruction(self, split_dict): return SplitReadInstruction(split_dict[self._name]) class NamedSplitAll(NamedSplit): """Split corresponding to the union of all defined dataset splits.""" def __init__(self): super().__init__("all") def __repr__(self): return "NamedSplitAll()" def get_read_instruction(self, split_dict): # Merge all dataset split together read_instructions = [SplitReadInstruction(s) for s in split_dict.values()] return sum(read_instructions, SplitReadInstruction()) class Split: # pylint: disable=line-too-long """`Enum` for dataset splits. Datasets are typically split into different subsets to be used at various stages of training and evaluation. - `TRAIN`: the training data. - `VALIDATION`: the validation data. If present, this is typically used as evaluation data while iterating on a model (e.g. changing hyperparameters, model architecture, etc.). - `TEST`: the testing data. This is the data to report metrics on. Typically you do not want to use this during model iteration as you may overfit to it. - `ALL`: the union of all defined dataset splits. All splits, including compositions inherit from `datasets.SplitBase`. See the [guide](../load_hub#splits) on splits for more information. Example: ```py >>> datasets.SplitGenerator( ... name=datasets.Split.TRAIN, ... gen_kwargs={"split_key": "train", "files": dl_manager.download_and extract(url)}, ... ), ... datasets.SplitGenerator( ... name=datasets.Split.VALIDATION, ... gen_kwargs={"split_key": "validation", "files": dl_manager.download_and extract(url)}, ... ), ... datasets.SplitGenerator( ... name=datasets.Split.TEST, ... gen_kwargs={"split_key": "test", "files": dl_manager.download_and extract(url)}, ... ) ``` """ # pylint: enable=line-too-long TRAIN = NamedSplit("train") TEST = NamedSplit("test") VALIDATION = NamedSplit("validation") ALL = NamedSplitAll() def __new__(cls, name): """Create a custom split with datasets.Split('custom_name').""" return NamedSplitAll() if name == "all" else NamedSplit(name) # Similar to SplitInfo, but contain an additional slice info SlicedSplitInfo = collections.namedtuple( "SlicedSplitInfo", [ "split_info", "slice_value", ], ) # noqa: E231 class SplitReadInstruction: """Object containing the reading instruction for the dataset. Similarly to `SplitDescriptor` nodes, this object can be composed with itself, but the resolution happens instantaneously, instead of keeping track of the tree, such as all instructions are compiled and flattened in a single SplitReadInstruction object containing the list of files and slice to use. Once resolved, the instructions can be accessed with: ``` read_instructions.get_list_sliced_split_info() # List of splits to use ``` """ def __init__(self, split_info=None): self._splits = NonMutableDict(error_msg="Overlap between splits. Split {key} has been added with " "itself.") if split_info: self.add(SlicedSplitInfo(split_info=split_info, slice_value=None)) def add(self, sliced_split): """Add a SlicedSplitInfo the read instructions.""" # TODO(epot): Check that the number of examples per shard % 100 == 0 # Otherwise the slices value may be unbalanced and not exactly reflect the # requested slice. self._splits[sliced_split.split_info.name] = sliced_split def __add__(self, other): """Merging split together.""" # Will raise error if a split has already be added (NonMutableDict) # TODO(epot): If a split is already added but there is no overlap between # the slices, should merge the slices (ex: [:10] + [80:]) split_instruction = SplitReadInstruction() split_instruction._splits.update(self._splits) # pylint: disable=protected-access split_instruction._splits.update(other._splits) # pylint: disable=protected-access return split_instruction def __getitem__(self, slice_value): """Sub-splits.""" # Will raise an error if a split has already been sliced split_instruction = SplitReadInstruction() for v in self._splits.values(): if v.slice_value is not None: raise ValueError(f"Trying to slice Split {v.split_info.name} which has already been sliced") v = v._asdict() v["slice_value"] = slice_value split_instruction.add(SlicedSplitInfo(**v)) return split_instruction def get_list_sliced_split_info(self): return list(self._splits.values()) class SplitDict(dict): """Split info object.""" def __init__(self, *args, dataset_name=None, **kwargs): super().__init__(*args, **kwargs) self.dataset_name = dataset_name def __getitem__(self, key: Union[SplitBase, str]): # 1st case: The key exists: `info.splits['train']` if str(key) in self: return super().__getitem__(str(key)) # 2nd case: Uses instructions: `info.splits['train[50%]']` else: instructions = make_file_instructions( name=self.dataset_name, split_infos=self.values(), instruction=key, ) return SubSplitInfo(instructions) def __setitem__(self, key: Union[SplitBase, str], value: SplitInfo): if key != value.name: raise ValueError(f"Cannot add elem. (key mismatch: '{key}' != '{value.name}')") super().__setitem__(key, value) def add(self, split_info: SplitInfo): """Add the split info.""" if split_info.name in self: raise ValueError(f"Split {split_info.name} already present") split_info.dataset_name = self.dataset_name super().__setitem__(split_info.name, split_info) @property def total_num_examples(self): """Return the total number of examples.""" return sum(s.num_examples for s in self.values()) @classmethod def from_split_dict(cls, split_infos: Union[List, Dict], dataset_name: Optional[str] = None): """Returns a new SplitDict initialized from a Dict or List of `split_infos`.""" if isinstance(split_infos, dict): split_infos = list(split_infos.values()) if dataset_name is None: dataset_name = split_infos[0].get("dataset_name") if split_infos else None split_dict = cls(dataset_name=dataset_name) for split_info in split_infos: if isinstance(split_info, dict): split_info = SplitInfo(**split_info) split_dict.add(split_info) return split_dict def to_split_dict(self): """Returns a list of SplitInfo protos that we have.""" out = [] for split_name, split_info in self.items(): split_info = copy.deepcopy(split_info) split_info.name = split_name out.append(split_info) return out def copy(self): return SplitDict.from_split_dict(self.to_split_dict(), self.dataset_name) def _to_yaml_list(self) -> list: out = [asdict(s) for s in self.to_split_dict()] # we don't need the shard lengths in YAML, since it depends on max_shard_size and num_proc for split_info_dict in out: split_info_dict.pop("shard_lengths", None) # we don't need the dataset_name attribute that is deprecated for split_info_dict in out: split_info_dict.pop("dataset_name", None) return out @classmethod def _from_yaml_list(cls, yaml_data: list) -> "SplitDict": return cls.from_split_dict(yaml_data) @dataclass class SplitGenerator: """Defines the split information for the generator. This should be used as returned value of `GeneratorBasedBuilder._split_generators`. See `GeneratorBasedBuilder._split_generators` for more info and example of usage. Args: name (`str`): Name of the `Split` for which the generator will create the examples. **gen_kwargs (additional keyword arguments): Keyword arguments to forward to the `DatasetBuilder._generate_examples` method of the builder. Example: ```py >>> datasets.SplitGenerator( ... name=datasets.Split.TRAIN, ... gen_kwargs={"split_key": "train", "files": dl_manager.download_and_extract(url)}, ... ) ``` """ name: str gen_kwargs: Dict = dataclasses.field(default_factory=dict) split_info: SplitInfo = dataclasses.field(init=False) def __post_init__(self): self.name = str(self.name) # Make sure we convert NamedSplits in strings NamedSplit(self.name) # check that it's a valid split name self.split_info = SplitInfo(name=self.name)
datasets/src/datasets/splits.py/0
{ "file_path": "datasets/src/datasets/splits.py", "repo_id": "datasets", "token_count": 9580 }
87
import re import textwrap from collections import Counter from itertools import groupby from operator import itemgetter from typing import Any, ClassVar, Dict, List, Optional, Tuple import yaml from huggingface_hub import DatasetCardData from ..config import METADATA_CONFIGS_FIELD from ..info import DatasetInfo, DatasetInfosDict from ..naming import _split_re from ..utils.logging import get_logger logger = get_logger(__name__) class _NoDuplicateSafeLoader(yaml.SafeLoader): def _check_no_duplicates_on_constructed_node(self, node): keys = [self.constructed_objects[key_node] for key_node, _ in node.value] keys = [tuple(key) if isinstance(key, list) else key for key in keys] counter = Counter(keys) duplicate_keys = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}") def construct_mapping(self, node, deep=False): mapping = super().construct_mapping(node, deep=deep) self._check_no_duplicates_on_constructed_node(node) return mapping def _split_yaml_from_readme(readme_content: str) -> Tuple[Optional[str], str]: full_content = list(readme_content.splitlines()) if full_content and full_content[0] == "---" and "---" in full_content[1:]: sep_idx = full_content[1:].index("---") + 1 yamlblock = "\n".join(full_content[1:sep_idx]) return yamlblock, "\n".join(full_content[sep_idx + 1 :]) return None, "\n".join(full_content) class MetadataConfigs(Dict[str, Dict[str, Any]]): """Should be in format {config_name: {**config_params}}.""" FIELD_NAME: ClassVar[str] = METADATA_CONFIGS_FIELD @staticmethod def _raise_if_data_files_field_not_valid(metadata_config: dict): yaml_data_files = metadata_config.get("data_files") if yaml_data_files is not None: yaml_error_message = textwrap.dedent( f""" Expected data_files in YAML to be either a string or a list of strings or a list of dicts with two keys: 'split' and 'path', but got {yaml_data_files} Examples of data_files in YAML: data_files: data.csv data_files: data/*.png data_files: - part0/* - part1/* data_files: - split: train path: train/* - split: test path: test/* data_files: - split: train path: - train/part1/* - train/part2/* - split: test path: test/* PS: some symbols like dashes '-' are not allowed in split names """ ) if not isinstance(yaml_data_files, (list, str)): raise ValueError(yaml_error_message) if isinstance(yaml_data_files, list): for yaml_data_files_item in yaml_data_files: if ( not isinstance(yaml_data_files_item, (str, dict)) or isinstance(yaml_data_files_item, dict) and not ( len(yaml_data_files_item) == 2 and "split" in yaml_data_files_item and re.match(_split_re, yaml_data_files_item["split"]) and isinstance(yaml_data_files_item.get("path"), (str, list)) ) ): raise ValueError(yaml_error_message) @classmethod def _from_exported_parquet_files_and_dataset_infos( cls, revision: str, exported_parquet_files: List[Dict[str, Any]], dataset_infos: DatasetInfosDict, ) -> "MetadataConfigs": metadata_configs = { config_name: { "data_files": [ { "split": split_name, "path": [ parquet_file["url"].replace("refs%2Fconvert%2Fparquet", revision) for parquet_file in parquet_files_for_split ], } for split_name, parquet_files_for_split in groupby(parquet_files_for_config, itemgetter("split")) ], "version": str(dataset_infos.get(config_name, DatasetInfo()).version or "0.0.0"), } for config_name, parquet_files_for_config in groupby(exported_parquet_files, itemgetter("config")) } if dataset_infos: # Preserve order of configs and splits metadata_configs = { config_name: { "data_files": [ data_file for split_name in dataset_info.splits for data_file in metadata_configs[config_name]["data_files"] if data_file["split"] == split_name ], "version": metadata_configs[config_name]["version"], } for config_name, dataset_info in dataset_infos.items() } return cls(metadata_configs) @classmethod def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "MetadataConfigs": if dataset_card_data.get(cls.FIELD_NAME): metadata_configs = dataset_card_data[cls.FIELD_NAME] if not isinstance(metadata_configs, list): raise ValueError(f"Expected {cls.FIELD_NAME} to be a list, but got '{metadata_configs}'") for metadata_config in metadata_configs: if "config_name" not in metadata_config: raise ValueError( f"Each config must include `config_name` field with a string name of a config, " f"but got {metadata_config}. " ) cls._raise_if_data_files_field_not_valid(metadata_config) return cls( { config["config_name"]: {param: value for param, value in config.items() if param != "config_name"} for config in metadata_configs } ) return cls() def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None: if self: for metadata_config in self.values(): self._raise_if_data_files_field_not_valid(metadata_config) current_metadata_configs = self.from_dataset_card_data(dataset_card_data) total_metadata_configs = dict(sorted({**current_metadata_configs, **self}.items())) for config_name, config_metadata in total_metadata_configs.items(): config_metadata.pop("config_name", None) dataset_card_data[self.FIELD_NAME] = [ {"config_name": config_name, **config_metadata} for config_name, config_metadata in total_metadata_configs.items() ] def get_default_config_name(self) -> Optional[str]: default_config_name = None for config_name, metadata_config in self.items(): if len(self) == 1 or config_name == "default" or metadata_config.get("default"): if default_config_name is None: default_config_name = config_name else: raise ValueError( f"Dataset has several default configs: '{default_config_name}' and '{config_name}'." ) return default_config_name # DEPRECATED - just here to support old versions of evaluate like 0.2.2 # To support new tasks on the Hugging Face Hub, please open a PR for this file: # https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/pipelines.ts known_task_ids = { "image-classification": [], "translation": [], "image-segmentation": [], "fill-mask": [], "automatic-speech-recognition": [], "token-classification": [], "sentence-similarity": [], "audio-classification": [], "question-answering": [], "summarization": [], "zero-shot-classification": [], "table-to-text": [], "feature-extraction": [], "other": [], "multiple-choice": [], "text-classification": [], "text-to-image": [], "text2text-generation": [], "zero-shot-image-classification": [], "tabular-classification": [], "tabular-regression": [], "image-to-image": [], "tabular-to-text": [], "unconditional-image-generation": [], "text-retrieval": [], "text-to-speech": [], "object-detection": [], "audio-to-audio": [], "text-generation": [], "conversational": [], "table-question-answering": [], "visual-question-answering": [], "image-to-text": [], "reinforcement-learning": [], "voice-activity-detection": [], "time-series-forecasting": [], "document-question-answering": [], }
datasets/src/datasets/utils/metadata.py/0
{ "file_path": "datasets/src/datasets/utils/metadata.py", "repo_id": "datasets", "token_count": 4517 }
88
--- TODO: "Add YAML tags here. Delete these instructions and copy-paste the YAML tags obtained with the online tagging app: https://huggingface.co/spaces/huggingface/datasets-tagging" --- # Dataset Card for [Dataset Name] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
datasets/templates/README.md/0
{ "file_path": "datasets/templates/README.md", "repo_id": "datasets", "token_count": 819 }
89
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _check_json_dataset(dataset, expected_features): assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_dataset_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = JsonDatasetReader(jsonl_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read() _check_json_dataset(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_dataset_from_json_features(features, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = JsonDatasetReader(jsonl_path, features=features, cache_dir=cache_dir).read() _check_json_dataset(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ], ) def test_dataset_from_json_with_unsorted_column_names(features, jsonl_312_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_3": "float64", "col_1": "string", "col_2": "int64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = JsonDatasetReader(jsonl_312_path, features=features, cache_dir=cache_dir).read() assert isinstance(dataset, Dataset) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def test_dataset_from_json_with_mismatched_features(jsonl_312_path, tmp_path): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} features = {"col_2": "int64", "col_3": "float64", "col_1": "string"} expected_features = features.copy() features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) cache_dir = tmp_path / "cache" dataset = JsonDatasetReader(jsonl_312_path, features=features, cache_dir=cache_dir).read() assert isinstance(dataset, Dataset) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_dataset_from_json_split(split, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = JsonDatasetReader(jsonl_path, cache_dir=cache_dir, split=split).read() _check_json_dataset(dataset, expected_features) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type", [str, list]) def test_dataset_from_json_path_type(path_type, jsonl_path, tmp_path): if issubclass(path_type, str): path = jsonl_path elif issubclass(path_type, list): path = [jsonl_path] cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = JsonDatasetReader(path, cache_dir=cache_dir).read() _check_json_dataset(dataset, expected_features) def _check_json_datasetdict(dataset_dict, expected_features, splits=("train",)): assert isinstance(dataset_dict, DatasetDict) for split in splits: dataset = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_datasetdict_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = JsonDatasetReader({"train": jsonl_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read() _check_json_datasetdict(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_datasetdict_from_json_features(features, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = JsonDatasetReader({"train": jsonl_path}, features=features, cache_dir=cache_dir).read() _check_json_datasetdict(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_datasetdict_from_json_splits(split, jsonl_path, tmp_path): if split: path = {split: jsonl_path} else: split = "train" path = {"train": jsonl_path, "test": jsonl_path} cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = JsonDatasetReader(path, cache_dir=cache_dir).read() _check_json_datasetdict(dataset, expected_features, splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def load_json(buffer): return json.load(buffer) def load_json_lines(buffer): return [json.loads(line) for line in buffer] class TestJsonDatasetWriter: @pytest.mark.parametrize("lines, load_json_function", [(True, load_json_lines), (False, load_json)]) def test_dataset_to_json_lines(self, lines, load_json_function, dataset): with io.BytesIO() as buffer: JsonDatasetWriter(dataset, buffer, lines=lines).write() buffer.seek(0) exported_content = load_json_function(buffer) assert isinstance(exported_content, list) assert isinstance(exported_content[0], dict) assert len(exported_content) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at", [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789"), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ], ) def test_dataset_to_json_orient(self, orient, container, keys, len_at, dataset): with io.BytesIO() as buffer: JsonDatasetWriter(dataset, buffer, lines=False, orient=orient).write() buffer.seek(0) exported_content = load_json(buffer) assert isinstance(exported_content, container) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(exported_content, "keys") and not hasattr(exported_content[0], "keys") if len_at: assert len(exported_content[len_at]) == 10 else: assert len(exported_content) == 10 @pytest.mark.parametrize("lines, load_json_function", [(True, load_json_lines), (False, load_json)]) def test_dataset_to_json_lines_multiproc(self, lines, load_json_function, dataset): with io.BytesIO() as buffer: JsonDatasetWriter(dataset, buffer, lines=lines, num_proc=2).write() buffer.seek(0) exported_content = load_json_function(buffer) assert isinstance(exported_content, list) assert isinstance(exported_content[0], dict) assert len(exported_content) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at", [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789"), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ], ) def test_dataset_to_json_orient_multiproc(self, orient, container, keys, len_at, dataset): with io.BytesIO() as buffer: JsonDatasetWriter(dataset, buffer, lines=False, orient=orient, num_proc=2).write() buffer.seek(0) exported_content = load_json(buffer) assert isinstance(exported_content, container) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(exported_content, "keys") and not hasattr(exported_content[0], "keys") if len_at: assert len(exported_content[len_at]) == 10 else: assert len(exported_content) == 10 def test_dataset_to_json_orient_invalidproc(self, dataset): with pytest.raises(ValueError): with io.BytesIO() as buffer: JsonDatasetWriter(dataset, buffer, num_proc=0) @pytest.mark.parametrize("compression, extension", [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")]) def test_dataset_to_json_compression(self, shared_datadir, tmp_path_factory, extension, compression, dataset): path = tmp_path_factory.mktemp("data") / f"test.json.{extension}" original_path = str(shared_datadir / f"test_file.json.{extension}") JsonDatasetWriter(dataset, path, compression=compression).write() with fsspec.open(path, "rb", compression="infer") as f: exported_content = f.read() with fsspec.open(original_path, "rb", compression="infer") as f: original_content = f.read() assert exported_content == original_content def test_dataset_to_json_fsspec(self, dataset, mockfs): dataset_path = "mock://my_dataset.json" writer = JsonDatasetWriter(dataset, dataset_path, storage_options=mockfs.storage_options) assert writer.write() > 0 assert mockfs.isfile(dataset_path) with fsspec.open(dataset_path, "rb", **mockfs.storage_options) as f: assert f.read()
datasets/tests/io/test_json.py/0
{ "file_path": "datasets/tests/io/test_json.py", "repo_id": "datasets", "token_count": 5153 }
90
import textwrap import pyarrow as pa import pytest from datasets import Features, Image from datasets.builder import InvalidConfigName from datasets.data_files import DataFilesList from datasets.packaged_modules.text.text import Text, TextConfig from ..utils import require_pil @pytest.fixture def text_file(tmp_path): filename = tmp_path / "text.txt" data = textwrap.dedent( """\ Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. Second paragraph: Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. """ ) with open(filename, "w", encoding="utf-8") as f: f.write(data) return str(filename) @pytest.fixture def text_file_with_image(tmp_path, image_file): filename = tmp_path / "text_with_image.txt" with open(filename, "w", encoding="utf-8") as f: f.write(image_file) return str(filename) def test_config_raises_when_invalid_name() -> None: with pytest.raises(InvalidConfigName, match="Bad characters"): _ = TextConfig(name="name-with-*-invalid-character") @pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])]) def test_config_raises_when_invalid_data_files(data_files) -> None: with pytest.raises(ValueError, match="Expected a DataFilesDict"): _ = TextConfig(name="name", data_files=data_files) @pytest.mark.parametrize("keep_linebreaks", [True, False]) def test_text_linebreaks(text_file, keep_linebreaks): with open(text_file, encoding="utf-8") as f: expected_content = f.read().splitlines(keepends=keep_linebreaks) text = Text(keep_linebreaks=keep_linebreaks, encoding="utf-8") generator = text._generate_tables([[text_file]]) generated_content = pa.concat_tables([table for _, table in generator]).to_pydict()["text"] assert generated_content == expected_content @require_pil def test_text_cast_image(text_file_with_image): with open(text_file_with_image, encoding="utf-8") as f: image_file = f.read().splitlines()[0] text = Text(encoding="utf-8", features=Features({"image": Image()})) generator = text._generate_tables([[text_file_with_image]]) pa_table = pa.concat_tables([table for _, table in generator]) assert pa_table.schema.field("image").type == Image()() generated_content = pa_table.to_pydict()["image"] assert generated_content == [{"path": image_file, "bytes": None}] @pytest.mark.parametrize("sample_by", ["line", "paragraph", "document"]) def test_text_sample_by(sample_by, text_file): with open(text_file, encoding="utf-8") as f: expected_content = f.read() if sample_by == "line": expected_content = expected_content.splitlines() elif sample_by == "paragraph": expected_content = expected_content.split("\n\n") elif sample_by == "document": expected_content = [expected_content] text = Text(sample_by=sample_by, encoding="utf-8", chunksize=100) generator = text._generate_tables([[text_file]]) generated_content = pa.concat_tables([table for _, table in generator]).to_pydict()["text"] assert generated_content == expected_content
datasets/tests/packaged_modules/test_text.py/0
{ "file_path": "datasets/tests/packaged_modules/test_text.py", "repo_id": "datasets", "token_count": 1494 }
91
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.core import url_to_fs from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, is_remote_filesystem from .utils import require_lz4, require_zstandard def test_mockfs(mockfs): assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def test_non_mockfs(): assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def test_is_remote_filesystem(mockfs): is_remote = is_remote_filesystem(mockfs) assert is_remote is True fs = fsspec.filesystem("file") is_remote = is_remote_filesystem(fs) assert is_remote is False @pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS) def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file): input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file} input_path = input_paths[compression_fs_class.protocol] if input_path is None: reason = f"for '{compression_fs_class.protocol}' compression protocol, " if compression_fs_class.protocol == "lz4": reason += require_lz4.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(reason) fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path) assert isinstance(fs, compression_fs_class) expected_filename = os.path.basename(input_path) expected_filename = expected_filename[: expected_filename.rindex(".")] assert fs.glob("*") == [expected_filename] with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("protocol", ["zip", "gzip"]) def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path): compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path} compressed_file_path = compressed_file_paths[protocol] member_file_path = "dataset.jsonl" path = f"{protocol}://{member_file_path}::{compressed_file_path}" fs, *_ = url_to_fs(path) assert fs.isfile(member_file_path) assert not fs.isfile("non_existing_" + member_file_path) def test_fs_overwrites(): protocol = "bz2" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(protocol, None, clobber=True) with pytest.warns(UserWarning) as warning_info: importlib.reload(datasets.filesystems) assert len(warning_info) == 1 assert ( str(warning_info[0].message) == f"A filesystem protocol was already set for {protocol} and will be overwritten." )
datasets/tests/test_filesystem.py/0
{ "file_path": "datasets/tests/test_filesystem.py", "repo_id": "datasets", "token_count": 1145 }
92
import inspect import pytest from datasets.splits import Split, SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( "split_dict", [ SplitDict(), SplitDict({"train": SplitInfo(name="train", num_bytes=1337, num_examples=42, dataset_name="my_dataset")}), SplitDict({"train": SplitInfo(name="train", num_bytes=1337, num_examples=42)}), SplitDict({"train": SplitInfo()}), ], ) def test_split_dict_to_yaml_list(split_dict: SplitDict): split_dict_yaml_list = split_dict._to_yaml_list() assert len(split_dict_yaml_list) == len(split_dict) reloaded = SplitDict._from_yaml_list(split_dict_yaml_list) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump split_info.dataset_name = None # the split name of split_dict takes over the name of the split info object split_info.name = split_name assert split_dict == reloaded @pytest.mark.parametrize( "split_info", [SplitInfo(), SplitInfo(dataset_name=None), SplitInfo(dataset_name="my_dataset")] ) def test_split_dict_asdict_has_dataset_name(split_info): # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files split_dict_asdict = asdict(SplitDict({"train": split_info})) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name def test_named_split_inequality(): # Used while building the docs, when set as a default parameter value in a function signature assert Split.TRAIN != inspect.Parameter.empty
datasets/tests/test_splits.py/0
{ "file_path": "datasets/tests/test_splits.py", "repo_id": "datasets", "token_count": 678 }
93
<jupyter_start><jupyter_text>Unit 3: Deep Q-Learning with Atari Games 👾 using RL Baselines3 ZooIn this notebook, **you'll train a Deep Q-Learning agent** playing Space Invaders using [RL Baselines3 Zoo](https://github.com/DLR-RM/rl-baselines3-zoo), a training framework based on [Stable-Baselines3](https://stable-baselines3.readthedocs.io/en/master/) that provides scripts for training, evaluating agents, tuning hyperparameters, plotting results and recording videos.We're using the [RL-Baselines-3 Zoo integration, a vanilla version of Deep Q-Learning](https://stable-baselines3.readthedocs.io/en/master/modules/dqn.html) with no extensions such as Double-DQN, Dueling-DQN, and Prioritized Experience Replay.⬇️ Here is an example of what **you will achieve** ⬇️<jupyter_code>%%html <video controls autoplay><source src="https://huggingface.co/ThomasSimonini/ppo-SpaceInvadersNoFrameskip-v4/resolve/main/replay.mp4" type="video/mp4"></video><jupyter_output><empty_output><jupyter_text>🎮 Environments:- [SpacesInvadersNoFrameskip-v4](https://gymnasium.farama.org/environments/atari/space_invaders/)You can see the difference between Space Invaders versions here 👉 https://gymnasium.farama.org/environments/atari/space_invaders/variants 📚 RL-Library:- [RL-Baselines3-Zoo](https://github.com/DLR-RM/rl-baselines3-zoo) Objectives of this notebook 🏆At the end of the notebook, you will:- Be able to understand deeper **how RL Baselines3 Zoo works**.- Be able to **push your trained agent and the code to the Hub** with a nice video replay and an evaluation score 🔥. This notebook is from Deep Reinforcement Learning Course In this free course, you will:- 📖 Study Deep Reinforcement Learning in **theory and practice**.- 🧑‍💻 Learn to **use famous Deep RL libraries** such as Stable Baselines3, RL Baselines3 Zoo, CleanRL and Sample Factory 2.0.- 🤖 Train **agents in unique environments**And more check 📚 the syllabus 👉 https://simoninithomas.github.io/deep-rl-courseDon’t forget to **sign up to the course** (we are collecting your email to be able to **send you the links when each Unit is published and give you information about the challenges and updates).**The best way to keep in touch is to join our discord server to exchange with the community and with us 👉🏻 https://discord.gg/ydHrjt3WP5 Prerequisites 🏗️Before diving into the notebook, you need to:🔲 📚 **[Study Deep Q-Learning by reading Unit 3](https://huggingface.co/deep-rl-course/unit3/introduction)** 🤗 We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the Github Repo](https://github.com/huggingface/deep-rl-class/issues). Let's train a Deep Q-Learning agent playing Atari' Space Invaders 👾 and upload it to the Hub.We strongly recommend students **to use Google Colab for the hands-on exercises instead of running them on their personal computers**.By using Google Colab, **you can focus on learning and experimenting without worrying about the technical aspects of setting up your environments**.To validate this hands-on for the certification process, you need to push your trained model to the Hub and **get a result of >= 200**.To find your result, go to the leaderboard and find your model, **the result = mean_reward - std of reward**For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process An advice 💡It's better to run this colab in a copy on your Google Drive, so that **if it timeouts** you still have the saved notebook on your Google Drive and do not need to fill everything from scratch.To do that you can either do `Ctrl + S` or `File > Save a copy in Google Drive.`Also, we're going to **train it for 90 minutes with 1M timesteps**. By typing `!nvidia-smi` will tell you what GPU you're using.And if you want to train more such 10 million steps, this will take about 9 hours, potentially resulting in Colab timing out. In that case, I recommend running this on your local computer (or somewhere else). Just click on: `File>Download`. Set the GPU 💪- To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` - `Hardware Accelerator > GPU` Install RL-Baselines3 Zoo and its dependencies 📚If you see `ERROR: pip's dependency resolver does not currently take into account all the packages that are installed.` **this is normal and it's not a critical error** there's a conflict of version. But the packages we need are installed.<jupyter_code>!pip install git+https://github.com/DLR-RM/rl-baselines3-zoo !apt-get install swig cmake ffmpeg<jupyter_output><empty_output><jupyter_text>To be able to use Atari games in Gymnasium we need to install atari package. And accept-rom-license to download the rom files (games files).<jupyter_code>!pip install gymnasium[atari] !pip install gymnasium[accept-rom-license]<jupyter_output><empty_output><jupyter_text>Create a virtual display 🔽During the notebook, we'll need to generate a replay video. To do so, with colab, **we need to have a virtual screen to be able to render the environment** (and thus record the frames).Hence the following cell will install the librairies and create and run a virtual screen 🖥<jupyter_code>%%capture !apt install python-opengl !apt install xvfb !pip3 install pyvirtualdisplay # Virtual display from pyvirtualdisplay import Display virtual_display = Display(visible=0, size=(1400, 900)) virtual_display.start()<jupyter_output><empty_output><jupyter_text>Train our Deep Q-Learning Agent to Play Space Invaders 👾To train an agent with RL-Baselines3-Zoo, we just need to do two things:1. Create a hyperparameter config file that will contain our training hyperparameters called `dqn.yml`.This is a template example:```SpaceInvadersNoFrameskip-v4: env_wrapper: - stable_baselines3.common.atari_wrappers.AtariWrapper frame_stack: 4 policy: 'CnnPolicy' n_timesteps: !!float 1e6 buffer_size: 100000 learning_rate: !!float 1e-4 batch_size: 32 learning_starts: 100000 target_update_interval: 1000 train_freq: 4 gradient_steps: 1 exploration_fraction: 0.1 exploration_final_eps: 0.01 If True, you need to deactivate handle_timeout_termination in the replay_buffer_kwargs optimize_memory_usage: False``` Here we see that:- We use the `Atari Wrapper` that preprocess the input (Frame reduction ,grayscale, stack 4 frames)- We use `CnnPolicy`, since we use Convolutional layers to process the frames- We train it for 10 million `n_timesteps`- Memory (Experience Replay) size is 100000, aka the amount of experience steps you saved to train again your agent with.💡 My advice is to **reduce the training timesteps to 1M,** which will take about 90 minutes on a P100. `!nvidia-smi` will tell you what GPU you're using. At 10 million steps, this will take about 9 hours, which could likely result in Colab timing out. I recommend running this on your local computer (or somewhere else). Just click on: `File>Download`. In terms of hyperparameters optimization, my advice is to focus on these 3 hyperparameters:- `learning_rate`- `buffer_size (Experience Memory size)`- `batch_size`As a good practice, you need to **check the documentation to understand what each hyperparameters does**: https://stable-baselines3.readthedocs.io/en/master/modules/dqn.htmlparameters 2. We start the training and save the models on `logs` folder 📁- Define the algorithm after `--algo`, where we save the model after `-f` and where the hyperparameter config is after `-c`.<jupyter_code>!python -m rl_zoo3.train --algo ________ --env SpaceInvadersNoFrameskip-v4 -f _________ -c _________<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>!python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -c dqn.yml<jupyter_output><empty_output><jupyter_text>Let's evaluate our agent 👀- RL-Baselines3-Zoo provides `enjoy.py`, a python script to evaluate our agent. In most RL libraries, we call the evaluation script `enjoy.py`.- Let's evaluate it for 5000 timesteps 🔥<jupyter_code>!python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 --no-render --n-timesteps _________ --folder logs/<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>!python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 --no-render --n-timesteps 5000 --folder logs/<jupyter_output><empty_output><jupyter_text>Publish our trained model on the Hub 🚀Now that we saw we got good results after the training, we can publish our trained model on the hub 🤗 with one line of code. By using `rl_zoo3.push_to_hub` **you evaluate, record a replay, generate a model card of your agent and push it to the hub**.This way:- You can **showcase our work** 🔥- You can **visualize your agent playing** 👀- You can **share with the community an agent that others can use** 💾- You can **access a leaderboard 🏆 to see how well your agent is performing compared to your classmates** 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard To be able to share your model with the community there are three more steps to follow:1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website.- Create a new token (https://huggingface.co/settings/tokens) **with write role** - Copy the token- Run the cell below and past the token<jupyter_code>from huggingface_hub import notebook_login # To log to our Hugging Face account to be able to upload models to the Hub. notebook_login() !git config --global credential.helper store<jupyter_output><empty_output><jupyter_text>If you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` 3️⃣ We're now ready to push our trained agent to the 🤗 Hub 🔥 Let's run push_to_hub.py file to upload our trained agent to the Hub.`--repo-name `: The name of the repo`-orga`: Your Hugging Face username`-f`: Where the trained model folder is (in our case `logs`)<jupyter_code>!python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 --repo-name _____________________ -orga _____________________ -f logs/<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>!python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 --repo-name dqn-SpaceInvadersNoFrameskip-v4 -orga ThomasSimonini -f logs/<jupyter_output><empty_output><jupyter_text>. Congrats 🥳 you've just trained and uploaded your first Deep Q-Learning agent using RL-Baselines-3 Zoo. The script above should have displayed a link to a model repository such as https://huggingface.co/ThomasSimonini/dqn-SpaceInvadersNoFrameskip-v4. When you go to this link, you can:- See a **video preview of your agent** at the right.- Click "Files and versions" to see all the files in the repository.- Click "Use in stable-baselines3" to get a code snippet that shows how to load the model.- A model card (`README.md` file) which gives a description of the model and the hyperparameters you used.Under the hood, the Hub uses git-based repositories (don't worry if you don't know what git is), which means you can update the model with new versions as you experiment and improve your agent.**Compare the results of your agents with your classmates** using the [leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) 🏆 Load a powerful trained model 🔥- The Stable-Baselines3 team uploaded **more than 150 trained Deep Reinforcement Learning agents on the Hub**.You can find them here: 👉 https://huggingface.co/sb3Some examples:- Asteroids: https://huggingface.co/sb3/dqn-AsteroidsNoFrameskip-v4- Beam Rider: https://huggingface.co/sb3/dqn-BeamRiderNoFrameskip-v4- Breakout: https://huggingface.co/sb3/dqn-BreakoutNoFrameskip-v4- Road Runner: https://huggingface.co/sb3/dqn-RoadRunnerNoFrameskip-v4Let's load an agent playing Beam Rider: https://huggingface.co/sb3/dqn-BeamRiderNoFrameskip-v4<jupyter_code>%%html <video controls autoplay><source src="https://huggingface.co/sb3/dqn-BeamRiderNoFrameskip-v4/resolve/main/replay.mp4" type="video/mp4"></video><jupyter_output><empty_output><jupyter_text>1. We download the model using `rl_zoo3.load_from_hub`, and place it in a new folder that we can call `rl_trained`<jupyter_code># Download model and save it into the logs/ folder !python -m rl_zoo3.load_from_hub --algo dqn --env BeamRiderNoFrameskip-v4 -orga sb3 -f rl_trained/<jupyter_output><empty_output><jupyter_text>2. Let's evaluate if for 5000 timesteps<jupyter_code>!python -m rl_zoo3.enjoy --algo dqn --env BeamRiderNoFrameskip-v4 -n 5000 -f rl_trained/ --no-render<jupyter_output><empty_output>
deep-rl-class/notebooks/unit3/unit3.ipynb/0
{ "file_path": "deep-rl-class/notebooks/unit3/unit3.ipynb", "repo_id": "deep-rl-class", "token_count": 3885 }
94
# Conclusion [[conclusion]] Congrats on finishing this unit! **That was the biggest one**, and there was a lot of information. And congrats on finishing the tutorial. You’ve just trained your first Deep RL agents and shared them with the community! 🥳 It's **normal if you still feel confused by some of these elements**. This was the same for me and for all people who studied RL. **Take time to really grasp the material** before continuing. It’s important to master these elements and have a solid foundation before entering the fun part. Naturally, during the course, we’re going to use and explain these terms again, but it’s better to understand them before diving into the next units. In the next (bonus) unit, we’re going to reinforce what we just learned by **training Huggy the Dog to fetch a stick**. You will then be able to play with him 🤗. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/huggy.jpg" alt="Huggy"/> Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then, please 👉 [fill this form](https://forms.gle/BzKXWzLAGZESGNaE9) ### Keep Learning, stay awesome 🤗
deep-rl-class/units/en/unit1/conclusion.mdx/0
{ "file_path": "deep-rl-class/units/en/unit1/conclusion.mdx", "repo_id": "deep-rl-class", "token_count": 349 }
95
# Glossary This is a community-created glossary. Contributions are welcomed! - **Tabular Method:** Type of problem in which the state and action spaces are small enough to approximate value functions to be represented as arrays and tables. **Q-learning** is an example of tabular method since a table is used to represent the value for different state-action pairs. - **Deep Q-Learning:** Method that trains a neural network to approximate, given a state, the different **Q-values** for each possible action at that state. It is used to solve problems when observational space is too big to apply a tabular Q-Learning approach. - **Temporal Limitation** is a difficulty presented when the environment state is represented by frames. A frame by itself does not provide temporal information. In order to obtain temporal information, we need to **stack** a number of frames together. - **Phases of Deep Q-Learning:** - **Sampling:** Actions are performed, and observed experience tuples are stored in a **replay memory**. - **Training:** Batches of tuples are selected randomly and the neural network updates its weights using gradient descent. - **Solutions to stabilize Deep Q-Learning:** - **Experience Replay:** A replay memory is created to save experiences samples that can be reused during training. This allows the agent to learn from the same experiences multiple times. Also, it helps the agent avoid forgetting previous experiences as it gets new ones. - **Random sampling** from replay buffer allows to remove correlation in the observation sequences and prevents action values from oscillating or diverging catastrophically. - **Fixed Q-Target:** In order to calculate the **Q-Target** we need to estimate the discounted optimal **Q-value** of the next state by using Bellman equation. The problem is that the same network weights are used to calculate the **Q-Target** and the **Q-value**. This means that everytime we are modifying the **Q-value**, the **Q-Target** also moves with it. To avoid this issue, a separate network with fixed parameters is used for estimating the Temporal Difference Target. The target network is updated by copying parameters from our Deep Q-Network after certain **C steps**. - **Double DQN:** Method to handle **overestimation** of **Q-Values**. This solution uses two networks to decouple the action selection from the target **Value generation**: - **DQN Network** to select the best action to take for the next state (the action with the highest **Q-Value**) - **Target Network** to calculate the target **Q-Value** of taking that action at the next state. This approach reduces the **Q-Values** overestimation, it helps to train faster and have more stable learning. If you want to improve the course, you can [open a Pull Request.](https://github.com/huggingface/deep-rl-class/pulls) This glossary was made possible thanks to: - [Dario Paez](https://github.com/dario248)
deep-rl-class/units/en/unit3/glossary.mdx/0
{ "file_path": "deep-rl-class/units/en/unit3/glossary.mdx", "repo_id": "deep-rl-class", "token_count": 721 }
96
# (Optional) What is Curiosity in Deep Reinforcement Learning? This is an (optional) introduction to Curiosity. If you want to learn more, you can read two additional articles where we dive into the mathematical details: - [Curiosity-Driven Learning through Next State Prediction](https://medium.com/data-from-the-trenches/curiosity-driven-learning-through-next-state-prediction-f7f4e2f592fa) - [Random Network Distillation: a new take on Curiosity-Driven Learning](https://medium.com/data-from-the-trenches/curiosity-driven-learning-through-random-network-distillation-488ffd8e5938) ## Two Major Problems in Modern RL To understand what Curiosity is, we first need to understand the two major problems with RL: First, the *sparse rewards problem:* that is, **most rewards do not contain information, and hence are set to zero**. Remember that RL is based on the *reward hypothesis*, which is the idea that each goal can be described as the maximization of the rewards. Therefore, rewards act as feedback for RL agents; **if they don’t receive any, their knowledge of which action is appropriate (or not) cannot change**. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/curiosity1.png" alt="Curiosity"/> <figcaption>Source: Thanks to the reward, our agent knows that this action at that state was good</figcaption> </figure> For instance, in [Vizdoom](https://vizdoom.cs.put.edu.pl/), a set of environments based on the game Doom “DoomMyWayHome,” your agent is only rewarded **if it finds the vest**. However, the vest is far away from your starting point, so most of your rewards will be zero. Therefore, if our agent does not receive useful feedback (dense rewards), it will take much longer to learn an optimal policy, and **it can spend time turning around without finding the goal**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/curiosity2.png" alt="Curiosity"/> The second big problem is that **the extrinsic reward function is handmade; in each environment, a human has to implement a reward function**. But how we can scale that in big and complex environments? ## So what is Curiosity? A solution to these problems is **to develop a reward function intrinsic to the agent, i.e., generated by the agent itself**. The agent will act as a self-learner since it will be the student and its own feedback master. **This intrinsic reward mechanism is known as Curiosity** because this reward pushes the agent to explore states that are novel/unfamiliar. To achieve that, our agent will receive a high reward when exploring new trajectories. This reward is inspired by how humans act. ** We naturally have an intrinsic desire to explore environments and discover new things**. There are different ways to calculate this intrinsic reward. The classical approach (Curiosity through next-state prediction) is to calculate Curiosity **as the error of our agent in predicting the next state, given the current state and action taken**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/curiosity3.png" alt="Curiosity"/> Because the idea of Curiosity is to **encourage our agent to perform actions that reduce the uncertainty in the agent’s ability to predict the consequences of its actions** (uncertainty will be higher in areas where the agent has spent less time or in areas with complex dynamics). If the agent spends a lot of time on these states, it will be good at predicting the next state (low Curiosity). On the other hand, if it’s in a new, unexplored state, it will be hard to predict the following state (high Curiosity). <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/curiosity4.png" alt="Curiosity"/> Using Curiosity will push our agent to favor transitions with high prediction error (which will be higher in areas where the agent has spent less time, or in areas with complex dynamics) and **consequently better explore our environment**. There’s also **other curiosity calculation methods**. ML-Agents uses a more advanced one called Curiosity through random network distillation. This is out of the scope of the tutorial but if you’re interested [I wrote an article explaining it in detail](https://medium.com/data-from-the-trenches/curiosity-driven-learning-through-random-network-distillation-488ffd8e5938).
deep-rl-class/units/en/unit5/curiosity.mdx/0
{ "file_path": "deep-rl-class/units/en/unit5/curiosity.mdx", "repo_id": "deep-rl-class", "token_count": 1153 }
97
# Hands-on Now that you learned the basics of multi-agents, you're ready to train your first agents in a multi-agent system: **a 2vs2 soccer team that needs to beat the opponent team**. And you’re going to participate in AI vs. AI challenges where your trained agent will compete against other classmates’ **agents every day and be ranked on a new leaderboard.** To validate this hands-on for the certification process, you just need to push a trained model. There **are no minimal results to attain to validate it.** For more information about the certification process, check this section 👉 [https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process](https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process) This hands-on will be different since to get correct results **you need to train your agents from 4 hours to 8 hours**. And given the risk of timeout in Colab, we advise you to train on your computer. You don’t need a supercomputer: a simple laptop is good enough for this exercise. Let's get started! 🔥 ## What is AI vs. AI? AI vs. AI is an open-source tool we developed at Hugging Face to compete agents on the Hub against one another in a multi-agent setting. These models are then ranked in a leaderboard. The idea of this tool is to have a robust evaluation tool: **by evaluating your agent with a lot of others, you’ll get a good idea of the quality of your policy.** More precisely, AI vs. AI is three tools: - A *matchmaking process* defining the matches (which model against which) and running the model fights using a background task in the Space. - A *leaderboard* getting the match history results and displaying the models’ ELO ratings: [https://huggingface.co/spaces/huggingface-projects/AIvsAI-SoccerTwos](https://huggingface.co/spaces/huggingface-projects/AIvsAI-SoccerTwos) - A *Space demo* to visualize your agents playing against others: [https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos](https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos) In addition to these three tools, your classmate cyllum created a 🤗 SoccerTwos Challenge Analytics where you can check the detailed match results of a model: [https://huggingface.co/spaces/cyllum/soccertwos-analytics](https://huggingface.co/spaces/cyllum/soccertwos-analytics) We're [wrote a blog post to explain this AI vs. AI tool in detail](https://huggingface.co/blog/aivsai), but to give you the big picture it works this way: - Every four hours, our algorithm **fetches all the available models for a given environment (in our case ML-Agents-SoccerTwos).** - It creates a **queue of matches with the matchmaking algorithm.** - We simulate the match in a Unity headless process and **gather the match result** (1 if the first model won, 0.5 if it’s a draw, 0 if the second model won) in a Dataset. - Then, when all matches from the matches queue are done, **we update the ELO score for each model and update the leaderboard.** ### Competition Rules This first AI vs. AI competition **is an experiment**: the goal is to improve the tool in the future with your feedback. So some **breakups can happen during the challenge**. But don't worry **all the results are saved in a dataset so we can always restart the calculation correctly without losing information**. In order for your model to get correctly evaluated against others you need to follow these rules: 1. **You can't change the observation space or action space of the agent.** By doing that your model will not work during evaluation. 2. You **can't use a custom trainer for now,** you need to use the Unity MLAgents ones. 3. We provide executables to train your agents. You can also use the Unity Editor if you prefer **, but to avoid bugs, we advise that you use our executables**. What will make the difference during this challenge are **the hyperparameters you choose**. We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues). ### Chat with your classmates, share advice and ask questions on Discord - We created a new channel called `ai-vs-ai-challenge` to exchange advice and ask questions. - If you didn’t join the discord server yet, you can [join here](https://discord.gg/ydHrjt3WP5) ## Step 0: Install MLAgents and download the correct executable We advise you to use [conda](https://docs.conda.io/en/latest/) as a package manager and create a new environment. With conda, we create a new environment called rl with **Python 3.10.12**: ```bash conda create --name rl python=3.10.12 conda activate rl ``` To be able to train our agents correctly and push to the Hub, we need to install ML-Agents ```bash git clone https://github.com/Unity-Technologies/ml-agents ``` When the cloning is done (it takes 2.63 GB), we go inside the repository and install the package ```bash cd ml-agents pip install -e ./ml-agents-envs pip install -e ./ml-agents ``` Mac users on Apple Silicon may encounter troubles with the installation (e.g. ONNX wheel build failing), you should first try to install grpcio: ```bash conda install grpcio ``` [This github issue](https://github.com/Unity-Technologies/ml-agents/issues/6019) in the official ml-agent repo might also help you. Finally, you need to install git-lfs: https://git-lfs.com/ Now that it’s installed, we need to add the environment training executable. Based on your operating system you need to download one of them, unzip it and place it in a new folder inside `ml-agents` that you call `training-envs-executables` At the end your executable should be in `ml-agents/training-envs-executables/SoccerTwos` Windows: Download [this executable](https://drive.google.com/file/d/1sqFxbEdTMubjVktnV4C6ICjp89wLhUcP/view?usp=sharing) Linux (Ubuntu): Download [this executable](https://drive.google.com/file/d/1KuqBKYiXiIcU4kNMqEzhgypuFP5_45CL/view?usp=sharing) Mac: Download [this executable](https://drive.google.com/drive/folders/1h7YB0qwjoxxghApQdEUQmk95ZwIDxrPG?usp=share_link) ⚠ For Mac you need also to call this `xattr -cr training-envs-executables/SoccerTwos/SoccerTwos.app` to be able to run SoccerTwos ## Step 1: Understand the environment The environment is called `SoccerTwos`. The Unity MLAgents Team made it. You can find its documentation [here](https://github.com/Unity-Technologies/ml-agents/blob/develop/docs/Learning-Environment-Examples.md#soccer-twos) The goal in this environment **is to get the ball into the opponent's goal while preventing the ball from entering your own goal.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/soccertwos.gif" alt="SoccerTwos"/> <figcaption>This environment was made by the <a href="https://github.com/Unity-Technologies/ml-agents"> Unity MLAgents Team</a></figcaption> </figure> ### The reward function The reward function is: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/soccerreward.png" alt="SoccerTwos Reward"/> ### The observation space The observation space is composed of vectors of size 336: - 11 ray-casts forward distributed over 120 degrees (264 state dimensions) - 3 ray-casts backward distributed over 90 degrees (72 state dimensions) - Both of these ray-casts can detect 6 objects: - Ball - Blue Goal - Purple Goal - Wall - Blue Agent - Purple Agent ### The action space The action space is three discrete branches: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/socceraction.png" alt="SoccerTwos Action"/> ## Step 2: Understand MA-POCA We know how to train agents to play against others: **we can use self-play.** This is a perfect technique for a 1vs1. But in our case we’re 2vs2, and each team has 2 agents. How then can we **train cooperative behavior for groups of agents?** As explained in the [Unity Blog](https://blog.unity.com/technology/ml-agents-v20-release-now-supports-training-complex-cooperative-behaviors), agents typically receive a reward as a group (+1 - penalty) when the team scores a goal. This implies that **every agent on the team is rewarded even if each agent didn’t contribute the same to the win**, which makes it difficult to learn what to do independently. The Unity MLAgents team developed the solution in a new multi-agent trainer called *MA-POCA (Multi-Agent POsthumous Credit Assignment)*. The idea is simple but powerful: a centralized critic **processes the states of all agents in the team to estimate how well each agent is doing**. Think of this critic as a coach. This allows each agent to **make decisions based only on what it perceives locally**, and **simultaneously evaluate how good its behavior is in the context of the whole group**. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/mapoca.png" alt="MA POCA"/> <figcaption>This illustrates MA-POCA’s centralized learning and decentralized execution. Source: <a href="https://blog.unity.com/technology/ml-agents-plays-dodgeball">MLAgents Plays Dodgeball</a> </figcaption> </figure> The solution then is to use Self-Play with an MA-POCA trainer (called poca). The poca trainer will help us to train cooperative behavior and self-play to win against an opponent team. If you want to dive deeper into this MA-POCA algorithm, you need to read the paper they published [here](https://arxiv.org/pdf/2111.05992.pdf) and the sources we put on the additional readings section. ## Step 3: Define the config file We already learned in [Unit 5](https://huggingface.co/deep-rl-course/unit5/introduction) that in ML-Agents, you define **the training hyperparameters in `config.yaml` files.** There are multiple hyperparameters. To understand them better, you should read the explanations for each of them in **[the documentation](https://github.com/Unity-Technologies/ml-agents/blob/release_20_docs/docs/Training-Configuration-File.md)** The config file we’re going to use here is in `./config/poca/SoccerTwos.yaml`. It looks like this: ```csharp behaviors: SoccerTwos: trainer_type: poca hyperparameters: batch_size: 2048 buffer_size: 20480 learning_rate: 0.0003 beta: 0.005 epsilon: 0.2 lambd: 0.95 num_epoch: 3 learning_rate_schedule: constant network_settings: normalize: false hidden_units: 512 num_layers: 2 vis_encode_type: simple reward_signals: extrinsic: gamma: 0.99 strength: 1.0 keep_checkpoints: 5 max_steps: 5000000 time_horizon: 1000 summary_freq: 10000 self_play: save_steps: 50000 team_change: 200000 swap_steps: 2000 window: 10 play_against_latest_model_ratio: 0.5 initial_elo: 1200.0 ``` Compared to Pyramids or SnowballTarget, we have new hyperparameters with a self-play part. How you modify them can be critical in getting good results. The advice I can give you here is to check the explanation and recommended value for each parameters (especially self-play ones) against **[the documentation](https://github.com/Unity-Technologies/ml-agents/blob/release_20_docs/docs/Training-Configuration-File.md).** Now that you’ve modified our config file, you’re ready to train your agents. ## Step 4: Start the training To train the agents, we need to **launch mlagents-learn and select the executable containing the environment.** We define four parameters: 1. `mlagents-learn <config>`: the path where the hyperparameter config file is. 2. `-env`: where the environment executable is. 3. `-run_id`: the name you want to give to your training run id. 4. `-no-graphics`: to not launch the visualization during the training. Depending on your hardware, 5M timesteps (the recommended value, but you can also try 10M) will take 5 to 8 hours of training. You can continue using your computer in the meantime, but I advise deactivating the computer standby mode to prevent the training from being stopped. Depending on the executable you use (windows, ubuntu, mac) the training command will look like this (your executable path can be different so don’t hesitate to check before running). For Windows, it might look like this: ```bash mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id="SoccerTwos" --no-graphics ``` For Mac, it might look like this: ```bash mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id="SoccerTwos" --no-graphics ``` The executable contains 8 copies of SoccerTwos. ⚠️ It’s normal if you don’t see a big increase of ELO score (and even a decrease below 1200) before 2M timesteps, since your agents will spend most of their time moving randomly on the field before being able to goal. ⚠️ You can stop the training with Ctrl + C but beware of typing this command only once to stop the training since MLAgents needs to generate a final .onnx file before closing the run. ## Step 5: **Push the agent to the Hugging Face Hub** Now that we trained our agents, we’re **ready to push them to the Hub to be able to participate in the AI vs. AI challenge and visualize them playing on your browser🔥.** To be able to share your model with the community, there are three more steps to follow: 1️⃣ (If it’s not already done) create an account to HF ➡ [https://huggingface.co/join](https://huggingface.co/join) 2️⃣ Sign in and store your authentication token from the Hugging Face website. Create a new token (https://huggingface.co/settings/tokens) **with write role** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/create-token.jpg" alt="Create HF Token"> Copy the token, run this, and paste the token ```bash huggingface-cli login ``` Then, we need to run `mlagents-push-to-hf`. And we define four parameters: 1. `-run-id`: the name of the training run id. 2. `-local-dir`: where the agent was saved, it’s results/<run_id name>, so in my case results/First Training. 3. `-repo-id`: the name of the Hugging Face repo you want to create or update. It’s always <your huggingface username>/<the repo name> If the repo does not exist **it will be created automatically** 4. `--commit-message`: since HF repos are git repositories you need to give a commit message. In my case ```bash mlagents-push-to-hf --run-id="SoccerTwos" --local-dir="./results/SoccerTwos" --repo-id="ThomasSimonini/poca-SoccerTwos" --commit-message="First Push"` ``` ```bash mlagents-push-to-hf --run-id= # Add your run id --local-dir= # Your local dir --repo-id= # Your repo id --commit-message="First Push" ``` If everything worked you should see this at the end of the process (but with a different url 😆) : Your model is pushed to the Hub. You can view your model here: https://huggingface.co/ThomasSimonini/poca-SoccerTwos It's the link to your model. It contains a model card that explains how to use it, your Tensorboard, and your config file. **What's awesome is that it's a git repository, which means you can have different commits, update your repository with a new push, etc.** ## Step 6: Verify that your model is ready for AI vs AI Challenge Now that your model is pushed to the Hub, **it’s going to be added automatically to the AI vs AI Challenge model pool.** It can take a little bit of time before your model is added to the leaderboard given we do a run of matches every 4h. But to ensure that everything works perfectly you need to check: 1. That you have this tag in your model: ML-Agents-SoccerTwos. This is the tag we use to select models to be added to the challenge pool. To do that go to your model and check the tags <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/verify1.png" alt="Verify"/> If it’s not the case you just need to modify the readme and add it <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/verify2.png" alt="Verify"/> 2. That you have a `SoccerTwos.onnx` file <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/verify3.png" alt="Verify"/> We strongly suggest that you create a new model when you push to the Hub if you want to train it again or train a new version. ## Step 7: Visualize some match in our demo Now that your model is part of AI vs AI Challenge, **you can visualize how good it is compared to others**: https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos In order to do that, you just need to go to this demo: - Select your model as team blue (or team purple if you prefer) and another model to compete against. The best opponents to compare your model to are either whoever is on top of the leaderboard or the [baseline model](https://huggingface.co/unity/MLAgents-SoccerTwos) The matches you see live are not used in the calculation of your result **but they are a good way to visualize how good your agent is**. And don't hesitate to share the best score your agent gets on discord in the #rl-i-made-this channel 🔥
deep-rl-class/units/en/unit7/hands-on.mdx/0
{ "file_path": "deep-rl-class/units/en/unit7/hands-on.mdx", "repo_id": "deep-rl-class", "token_count": 5207 }
98
# Conclusion [[conclusion]] Congrats on finishing this bonus unit! You can now sit and enjoy playing with your Huggy 🐶. And don't **forget to spread the love by sharing Huggy with your friends 🤗**. And if you share about it on social media, **please tag us @huggingface and me @simoninithomas** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/huggy-cover.jpeg" alt="Huggy cover" width="100%"> Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please 👉 [fill out this form](https://forms.gle/BzKXWzLAGZESGNaE9) ### Keep Learning, stay awesome 🤗
deep-rl-class/units/en/unitbonus1/conclusion.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus1/conclusion.mdx", "repo_id": "deep-rl-class", "token_count": 227 }
99
# Model Based Reinforcement Learning (MBRL) Model-based reinforcement learning only differs from its model-free counterpart in learning a *dynamics model*, but that has substantial downstream effects on how the decisions are made. The dynamics model usually models the environment transition dynamics, \\( s_{t+1} = f_\theta (s_t, a_t) \\), but things like inverse dynamics models (mapping from states to actions) or reward models (predicting rewards) can be used in this framework. ## Simple definition - There is an agent that repeatedly tries to solve a problem, **accumulating state and action data**. - With that data, the agent creates a structured learning tool, *a dynamics model*, to reason about the world. - With the dynamics model, the agent **decides how to act by predicting the future**. - With those actions, **the agent collects more data, improves said model, and hopefully improves future actions**. ## Academic definition Model-based reinforcement learning (MBRL) follows the framework of an agent interacting in an environment, **learning a model of said environment**, and then **leveraging the model for control (making decisions). Specifically, the agent acts in a Markov Decision Process (MDP) governed by a transition function \\( s_{t+1} = f (s_t , a_t) \\) and returns a reward at each step \\( r(s_t, a_t) \\). With a collected dataset \\( D :={ s_i, a_i, s_{i+1}, r_i} \\), the agent learns a model, \\( s_{t+1} = f_\theta (s_t , a_t) \\) **to minimize the negative log-likelihood of the transitions**. We employ sample-based model-predictive control (MPC) using the learned dynamics model, which optimizes the expected reward over a finite, recursively predicted horizon, \\( \tau \\), from a set of actions sampled from a uniform distribution \\( U(a) \\), (see [paper](https://arxiv.org/pdf/2002.04523) or [paper](https://arxiv.org/pdf/2012.09156.pdf) or [paper](https://arxiv.org/pdf/2009.01221.pdf)). ## Further reading For more information on MBRL, we recommend you check out the following resources: - A [blog post on debugging MBRL](https://www.natolambert.com/writing/debugging-mbrl). - A [recent review paper on MBRL](https://arxiv.org/abs/2006.16712), ## Author This section was written by <a href="https://twitter.com/natolambert"> Nathan Lambert </a>
deep-rl-class/units/en/unitbonus3/model-based.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus3/model-based.mdx", "repo_id": "deep-rl-class", "token_count": 641 }
100
# Files for typos # Instruction: https://github.com/marketplace/actions/typos-action#getting-started [default.extend-identifiers] [default.extend-words] NIN="NIN" # NIN is used in scripts/convert_ncsnpp_original_checkpoint_to_diffusers.py nd="np" # nd may be np (numpy) parms="parms" # parms is used in scripts/convert_original_stable_diffusion_to_diffusers.py [files] extend-exclude = ["_typos.toml"]
diffusers/_typos.toml/0
{ "file_path": "diffusers/_typos.toml", "repo_id": "diffusers", "token_count": 151 }
101
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # LoRA LoRA is a fast and lightweight training method that inserts and trains a significantly smaller number of parameters instead of all the model parameters. This produces a smaller file (~100 MBs) and makes it easier to quickly train a model to learn a new concept. LoRA weights are typically loaded into the denoiser, text encoder or both. The denoiser usually corresponds to a UNet ([`UNet2DConditionModel`], for example) or a Transformer ([`SD3Transformer2DModel`], for example). There are several classes for loading LoRA weights: - [`StableDiffusionLoraLoaderMixin`] provides functions for loading and unloading, fusing and unfusing, enabling and disabling, and more functions for managing LoRA weights. This class can be used with any model. - [`StableDiffusionXLLoraLoaderMixin`] is a [Stable Diffusion (SDXL)](../../api/pipelines/stable_diffusion/stable_diffusion_xl) version of the [`StableDiffusionLoraLoaderMixin`] class for loading and saving LoRA weights. It can only be used with the SDXL model. - [`SD3LoraLoaderMixin`] provides similar functions for [Stable Diffusion 3](https://huggingface.co/blog/sd3). - [`AmusedLoraLoaderMixin`] is for the [`AmusedPipeline`]. - [`LoraBaseMixin`] provides a base class with several utility methods to fuse, unfuse, unload, LoRAs and more. <Tip> To learn more about how to load LoRA weights, see the [LoRA](../../using-diffusers/loading_adapters#lora) loading guide. </Tip> ## StableDiffusionLoraLoaderMixin [[autodoc]] loaders.lora_pipeline.StableDiffusionLoraLoaderMixin ## StableDiffusionXLLoraLoaderMixin [[autodoc]] loaders.lora_pipeline.StableDiffusionXLLoraLoaderMixin ## SD3LoraLoaderMixin [[autodoc]] loaders.lora_pipeline.SD3LoraLoaderMixin ## AmusedLoraLoaderMixin [[autodoc]] loaders.lora_pipeline.AmusedLoraLoaderMixin ## LoraBaseMixin [[autodoc]] loaders.lora_base.LoraBaseMixin
diffusers/docs/source/en/api/loaders/lora.md/0
{ "file_path": "diffusers/docs/source/en/api/loaders/lora.md", "repo_id": "diffusers", "token_count": 723 }
102
<!--Copyright 2024 The HuggingFace Team and Tencent Hunyuan Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # HunyuanDiT2DControlNetModel HunyuanDiT2DControlNetModel is an implementation of ControlNet for [Hunyuan-DiT](https://arxiv.org/abs/2405.08748). ControlNet was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. With a ControlNet model, you can provide an additional control image to condition and control Hunyuan-DiT generation. For example, if you provide a depth map, the ControlNet model generates an image that'll preserve the spatial information from the depth map. It is a more flexible and accurate way to control the image generation process. The abstract from the paper is: *We present ControlNet, a neural network architecture to add spatial conditioning controls to large, pretrained text-to-image diffusion models. ControlNet locks the production-ready large diffusion models, and reuses their deep and robust encoding layers pretrained with billions of images as a strong backbone to learn a diverse set of conditional controls. The neural architecture is connected with "zero convolutions" (zero-initialized convolution layers) that progressively grow the parameters from zero and ensure that no harmful noise could affect the finetuning. We test various conditioning controls, eg, edges, depth, segmentation, human pose, etc, with Stable Diffusion, using single or multiple conditions, with or without prompts. We show that the training of ControlNets is robust with small (<50k) and large (>1m) datasets. Extensive results show that ControlNet may facilitate wider applications to control image diffusion models.* This code is implemented by Tencent Hunyuan Team. You can find pre-trained checkpoints for Hunyuan-DiT ControlNets on [Tencent Hunyuan](https://huggingface.co/Tencent-Hunyuan). ## Example For Loading HunyuanDiT2DControlNetModel ```py from diffusers import HunyuanDiT2DControlNetModel import torch controlnet = HunyuanDiT2DControlNetModel.from_pretrained("Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Pose", torch_dtype=torch.float16) ``` ## HunyuanDiT2DControlNetModel [[autodoc]] HunyuanDiT2DControlNetModel
diffusers/docs/source/en/api/models/controlnet_hunyuandit.md/0
{ "file_path": "diffusers/docs/source/en/api/models/controlnet_hunyuandit.md", "repo_id": "diffusers", "token_count": 709 }
103
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Flux Flux is a series of text-to-image generation models based on diffusion transformers. To know more about Flux, check out the original [blog post](https://blackforestlabs.ai/announcing-black-forest-labs/) by the creators of Flux, Black Forest Labs. Original model checkpoints for Flux can be found [here](https://huggingface.co/black-forest-labs). Original inference code can be found [here](https://github.com/black-forest-labs/flux). <Tip> Flux can be quite expensive to run on consumer hardware devices. However, you can perform a suite of optimizations to run it faster and in a more memory-friendly manner. Check out [this section](https://huggingface.co/blog/sd3#memory-optimizations-for-sd3) for more details. Additionally, Flux can benefit from quantization for memory efficiency with a trade-off in inference latency. Refer to [this blog post](https://huggingface.co/blog/quanto-diffusers) to learn more. For an exhaustive list of resources, check out [this gist](https://gist.github.com/sayakpaul/b664605caf0aa3bf8585ab109dd5ac9c). </Tip> Flux comes in two variants: * Timestep-distilled (`black-forest-labs/FLUX.1-schnell`) * Guidance-distilled (`black-forest-labs/FLUX.1-dev`) Both checkpoints have slightly difference usage which we detail below. ### Timestep-distilled * `max_sequence_length` cannot be more than 256. * `guidance_scale` needs to be 0. * As this is a timestep-distilled model, it benefits from fewer sampling steps. ```python import torch from diffusers import FluxPipeline pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16) pipe.enable_model_cpu_offload() prompt = "A cat holding a sign that says hello world" out = pipe( prompt=prompt, guidance_scale=0., height=768, width=1360, num_inference_steps=4, max_sequence_length=256, ).images[0] out.save("image.png") ``` ### Guidance-distilled * The guidance-distilled variant takes about 50 sampling steps for good-quality generation. * It doesn't have any limitations around the `max_sequence_length`. ```python import torch from diffusers import FluxPipeline pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16) pipe.enable_model_cpu_offload() prompt = "a tiny astronaut hatching from an egg on the moon" out = pipe( prompt=prompt, guidance_scale=3.5, height=768, width=1360, num_inference_steps=50, ).images[0] out.save("image.png") ``` ## Running FP16 inference Flux can generate high-quality images with FP16 (i.e. to accelerate inference on Turing/Volta GPUs) but produces different outputs compared to FP32/BF16. The issue is that some activations in the text encoders have to be clipped when running in FP16, which affects the overall image. Forcing text encoders to run with FP32 inference thus removes this output difference. See [here](https://github.com/huggingface/diffusers/pull/9097#issuecomment-2272292516) for details. FP16 inference code: ```python import torch from diffusers import FluxPipeline pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16) # can replace schnell with dev # to run on low vram GPUs (i.e. between 4 and 32 GB VRAM) pipe.enable_sequential_cpu_offload() pipe.vae.enable_slicing() pipe.vae.enable_tiling() pipe.to(torch.float16) # casting here instead of in the pipeline constructor because doing so in the constructor loads all models into CPU memory at once prompt = "A cat holding a sign that says hello world" out = pipe( prompt=prompt, guidance_scale=0., height=768, width=1360, num_inference_steps=4, max_sequence_length=256, ).images[0] out.save("image.png") ``` ## Single File Loading for the `FluxTransformer2DModel` The `FluxTransformer2DModel` supports loading checkpoints in the original format shipped by Black Forest Labs. This is also useful when trying to load finetunes or quantized versions of the models that have been published by the community. <Tip> `FP8` inference can be brittle depending on the GPU type, CUDA version, and `torch` version that you are using. It is recommended that you use the `optimum-quanto` library in order to run FP8 inference on your machine. </Tip> The following example demonstrates how to run Flux with less than 16GB of VRAM. First install `optimum-quanto` ```shell pip install optimum-quanto ``` Then run the following example ```python import torch from diffusers import FluxTransformer2DModel, FluxPipeline from transformers import T5EncoderModel, CLIPTextModel from optimum.quanto import freeze, qfloat8, quantize bfl_repo = "black-forest-labs/FLUX.1-dev" dtype = torch.bfloat16 transformer = FluxTransformer2DModel.from_single_file("https://huggingface.co/Kijai/flux-fp8/blob/main/flux1-dev-fp8.safetensors", torch_dtype=dtype) quantize(transformer, weights=qfloat8) freeze(transformer) text_encoder_2 = T5EncoderModel.from_pretrained(bfl_repo, subfolder="text_encoder_2", torch_dtype=dtype) quantize(text_encoder_2, weights=qfloat8) freeze(text_encoder_2) pipe = FluxPipeline.from_pretrained(bfl_repo, transformer=None, text_encoder_2=None, torch_dtype=dtype) pipe.transformer = transformer pipe.text_encoder_2 = text_encoder_2 pipe.enable_model_cpu_offload() prompt = "A cat holding a sign that says hello world" image = pipe( prompt, guidance_scale=3.5, output_type="pil", num_inference_steps=20, generator=torch.Generator("cpu").manual_seed(0) ).images[0] image.save("flux-fp8-dev.png") ``` ## FluxPipeline [[autodoc]] FluxPipeline - all - __call__
diffusers/docs/source/en/api/pipelines/flux.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/flux.md", "repo_id": "diffusers", "token_count": 2003 }
104
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Paint by Example [Paint by Example: Exemplar-based Image Editing with Diffusion Models](https://huggingface.co/papers/2211.13227) is by Binxin Yang, Shuyang Gu, Bo Zhang, Ting Zhang, Xuejin Chen, Xiaoyan Sun, Dong Chen, Fang Wen. The abstract from the paper is: *Language-guided image editing has achieved great success recently. In this paper, for the first time, we investigate exemplar-guided image editing for more precise control. We achieve this goal by leveraging self-supervised training to disentangle and re-organize the source image and the exemplar. However, the naive approach will cause obvious fusing artifacts. We carefully analyze it and propose an information bottleneck and strong augmentations to avoid the trivial solution of directly copying and pasting the exemplar image. Meanwhile, to ensure the controllability of the editing process, we design an arbitrary shape mask for the exemplar image and leverage the classifier-free guidance to increase the similarity to the exemplar image. The whole framework involves a single forward of the diffusion model without any iterative optimization. We demonstrate that our method achieves an impressive performance and enables controllable editing on in-the-wild images with high fidelity.* The original codebase can be found at [Fantasy-Studio/Paint-by-Example](https://github.com/Fantasy-Studio/Paint-by-Example), and you can try it out in a [demo](https://huggingface.co/spaces/Fantasy-Studio/Paint-by-Example). ## Tips Paint by Example is supported by the official [Fantasy-Studio/Paint-by-Example](https://huggingface.co/Fantasy-Studio/Paint-by-Example) checkpoint. The checkpoint is warm-started from [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) to inpaint partly masked images conditioned on example and reference images. <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## PaintByExamplePipeline [[autodoc]] PaintByExamplePipeline - all - __call__ ## StableDiffusionPipelineOutput [[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
diffusers/docs/source/en/api/pipelines/paint_by_example.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/paint_by_example.md", "repo_id": "diffusers", "token_count": 762 }
105
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Inpainting The Stable Diffusion model can also be applied to inpainting which lets you edit specific parts of an image by providing a mask and a text prompt using Stable Diffusion. ## Tips It is recommended to use this pipeline with checkpoints that have been specifically fine-tuned for inpainting, such as [runwayml/stable-diffusion-inpainting](https://huggingface.co/runwayml/stable-diffusion-inpainting). Default text-to-image Stable Diffusion checkpoints, such as [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) are also compatible but they might be less performant. <Tip> Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently! If you're interested in using one of the official checkpoints for a task, explore the [CompVis](https://huggingface.co/CompVis), [Runway](https://huggingface.co/runwayml), and [Stability AI](https://huggingface.co/stabilityai) Hub organizations! </Tip> ## StableDiffusionInpaintPipeline [[autodoc]] StableDiffusionInpaintPipeline - all - __call__ - enable_attention_slicing - disable_attention_slicing - enable_xformers_memory_efficient_attention - disable_xformers_memory_efficient_attention - load_textual_inversion - load_lora_weights - save_lora_weights ## StableDiffusionPipelineOutput [[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput ## FlaxStableDiffusionInpaintPipeline [[autodoc]] FlaxStableDiffusionInpaintPipeline - all - __call__ ## FlaxStableDiffusionPipelineOutput [[autodoc]] pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput
diffusers/docs/source/en/api/pipelines/stable_diffusion/inpaint.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/stable_diffusion/inpaint.md", "repo_id": "diffusers", "token_count": 680 }
106
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # DeepCache [DeepCache](https://huggingface.co/papers/2312.00858) accelerates [`StableDiffusionPipeline`] and [`StableDiffusionXLPipeline`] by strategically caching and reusing high-level features while efficiently updating low-level features by taking advantage of the U-Net architecture. Start by installing [DeepCache](https://github.com/horseee/DeepCache): ```bash pip install DeepCache ``` Then load and enable the [`DeepCacheSDHelper`](https://github.com/horseee/DeepCache#usage): ```diff import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5', torch_dtype=torch.float16).to("cuda") + from DeepCache import DeepCacheSDHelper + helper = DeepCacheSDHelper(pipe=pipe) + helper.set_params( + cache_interval=3, + cache_branch_id=0, + ) + helper.enable() image = pipe("a photo of an astronaut on a moon").images[0] ``` The `set_params` method accepts two arguments: `cache_interval` and `cache_branch_id`. `cache_interval` means the frequency of feature caching, specified as the number of steps between each cache operation. `cache_branch_id` identifies which branch of the network (ordered from the shallowest to the deepest layer) is responsible for executing the caching processes. Opting for a lower `cache_branch_id` or a larger `cache_interval` can lead to faster inference speed at the expense of reduced image quality (ablation experiments of these two hyperparameters can be found in the [paper](https://arxiv.org/abs/2312.00858)). Once those arguments are set, use the `enable` or `disable` methods to activate or deactivate the `DeepCacheSDHelper`. <div class="flex justify-center"> <img src="https://github.com/horseee/Diffusion_DeepCache/raw/master/static/images/example.png"> </div> You can find more generated samples (original pipeline vs DeepCache) and the corresponding inference latency in the [WandB report](https://wandb.ai/horseee/DeepCache/runs/jwlsqqgt?workspace=user-horseee). The prompts are randomly selected from the [MS-COCO 2017](https://cocodataset.org/#home) dataset. ## Benchmark We tested how much faster DeepCache accelerates [Stable Diffusion v2.1](https://huggingface.co/stabilityai/stable-diffusion-2-1) with 50 inference steps on an NVIDIA RTX A5000, using different configurations for resolution, batch size, cache interval (I), and cache branch (B). | **Resolution** | **Batch size** | **Original** | **DeepCache(I=3, B=0)** | **DeepCache(I=5, B=0)** | **DeepCache(I=5, B=1)** | |----------------|----------------|--------------|-------------------------|-------------------------|-------------------------| | 512| 8| 15.96| 6.88(2.32x)| 5.03(3.18x)| 7.27(2.20x)| | | 4| 8.39| 3.60(2.33x)| 2.62(3.21x)| 3.75(2.24x)| | | 1| 2.61| 1.12(2.33x)| 0.81(3.24x)| 1.11(2.35x)| | 768| 8| 43.58| 18.99(2.29x)| 13.96(3.12x)| 21.27(2.05x)| | | 4| 22.24| 9.67(2.30x)| 7.10(3.13x)| 10.74(2.07x)| | | 1| 6.33| 2.72(2.33x)| 1.97(3.21x)| 2.98(2.12x)| | 1024| 8| 101.95| 45.57(2.24x)| 33.72(3.02x)| 53.00(1.92x)| | | 4| 49.25| 21.86(2.25x)| 16.19(3.04x)| 25.78(1.91x)| | | 1| 13.83| 6.07(2.28x)| 4.43(3.12x)| 7.15(1.93x)|
diffusers/docs/source/en/optimization/deepcache.md/0
{ "file_path": "diffusers/docs/source/en/optimization/deepcache.md", "repo_id": "diffusers", "token_count": 1911 }
107
<!--Copyright 2024 Custom Diffusion authors The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Custom Diffusion [Custom Diffusion](https://huggingface.co/papers/2212.04488) is a training technique for personalizing image generation models. Like Textual Inversion, DreamBooth, and LoRA, Custom Diffusion only requires a few (~4-5) example images. This technique works by only training weights in the cross-attention layers, and it uses a special word to represent the newly learned concept. Custom Diffusion is unique because it can also learn multiple concepts at the same time. If you're training on a GPU with limited vRAM, you should try enabling xFormers with `--enable_xformers_memory_efficient_attention` for faster training with lower vRAM requirements (16GB). To save even more memory, add `--set_grads_to_none` in the training argument to set the gradients to `None` instead of zero (this option can cause some issues, so if you experience any, try removing this parameter). This guide will explore the [train_custom_diffusion.py](https://github.com/huggingface/diffusers/blob/main/examples/custom_diffusion/train_custom_diffusion.py) script to help you become more familiar with it, and how you can adapt it for your own use-case. Before running the script, make sure you install the library from source: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Navigate to the example folder with the training script and install the required dependencies: ```bash cd examples/custom_diffusion pip install -r requirements.txt pip install clip-retrieval ``` <Tip> 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. </Tip> Initialize an 🤗 Accelerate environment: ```bash accelerate config ``` To setup a default 🤗 Accelerate environment without choosing any configurations: ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell, like a notebook, you can use: ```py from accelerate.utils import write_basic_config write_basic_config() ``` Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script. <Tip> The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/custom_diffusion/train_custom_diffusion.py) and let us know if you have any questions or concerns. </Tip> ## Script parameters The training script contains all the parameters to help you customize your training run. These are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L319) function. The function comes with default values, but you can also set your own values in the training command if you'd like. For example, to change the resolution of the input image: ```bash accelerate launch train_custom_diffusion.py \ --resolution=256 ``` Many of the basic parameters are described in the [DreamBooth](dreambooth#script-parameters) training guide, so this guide focuses on the parameters unique to Custom Diffusion: - `--freeze_model`: freezes the key and value parameters in the cross-attention layer; the default is `crossattn_kv`, but you can set it to `crossattn` to train all the parameters in the cross-attention layer - `--concepts_list`: to learn multiple concepts, provide a path to a JSON file containing the concepts - `--modifier_token`: a special word used to represent the learned concept - `--initializer_token`: a special word used to initialize the embeddings of the `modifier_token` ### Prior preservation loss Prior preservation loss is a method that uses a model's own generated samples to help it learn how to generate more diverse images. Because these generated sample images belong to the same class as the images you provided, they help the model retain what it has learned about the class and how it can use what it already knows about the class to make new compositions. Many of the parameters for prior preservation loss are described in the [DreamBooth](dreambooth#prior-preservation-loss) training guide. ### Regularization Custom Diffusion includes training the target images with a small set of real images to prevent overfitting. As you can imagine, this can be easy to do when you're only training on a few images! Download 200 real images with `clip_retrieval`. The `class_prompt` should be the same category as the target images. These images are stored in `class_data_dir`. ```bash python retrieve.py --class_prompt cat --class_data_dir real_reg/samples_cat --num_class_images 200 ``` To enable regularization, add the following parameters: - `--with_prior_preservation`: whether to use prior preservation loss - `--prior_loss_weight`: controls the influence of the prior preservation loss on the model - `--real_prior`: whether to use a small set of real images to prevent overfitting ```bash accelerate launch train_custom_diffusion.py \ --with_prior_preservation \ --prior_loss_weight=1.0 \ --class_data_dir="./real_reg/samples_cat" \ --class_prompt="cat" \ --real_prior=True \ ``` ## Training script <Tip> A lot of the code in the Custom Diffusion training script is similar to the [DreamBooth](dreambooth#training-script) script. This guide instead focuses on the code that is relevant to Custom Diffusion. </Tip> The Custom Diffusion training script has two dataset classes: - [`CustomDiffusionDataset`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L165): preprocesses the images, class images, and prompts for training - [`PromptDataset`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L148): prepares the prompts for generating class images Next, the `modifier_token` is [added to the tokenizer](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L811), converted to token ids, and the token embeddings are resized to account for the new `modifier_token`. Then the `modifier_token` embeddings are initialized with the embeddings of the `initializer_token`. All parameters in the text encoder are frozen, except for the token embeddings since this is what the model is trying to learn to associate with the concepts. ```py params_to_freeze = itertools.chain( text_encoder.text_model.encoder.parameters(), text_encoder.text_model.final_layer_norm.parameters(), text_encoder.text_model.embeddings.position_embedding.parameters(), ) freeze_params(params_to_freeze) ``` Now you'll need to add the [Custom Diffusion weights](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L911C3-L911C3) to the attention layers. This is a really important step for getting the shape and size of the attention weights correct, and for setting the appropriate number of attention processors in each UNet block. ```py st = unet.state_dict() for name, _ in unet.attn_processors.items(): cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim if name.startswith("mid_block"): hidden_size = unet.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(unet.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = unet.config.block_out_channels[block_id] layer_name = name.split(".processor")[0] weights = { "to_k_custom_diffusion.weight": st[layer_name + ".to_k.weight"], "to_v_custom_diffusion.weight": st[layer_name + ".to_v.weight"], } if train_q_out: weights["to_q_custom_diffusion.weight"] = st[layer_name + ".to_q.weight"] weights["to_out_custom_diffusion.0.weight"] = st[layer_name + ".to_out.0.weight"] weights["to_out_custom_diffusion.0.bias"] = st[layer_name + ".to_out.0.bias"] if cross_attention_dim is not None: custom_diffusion_attn_procs[name] = attention_class( train_kv=train_kv, train_q_out=train_q_out, hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, ).to(unet.device) custom_diffusion_attn_procs[name].load_state_dict(weights) else: custom_diffusion_attn_procs[name] = attention_class( train_kv=False, train_q_out=False, hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, ) del st unet.set_attn_processor(custom_diffusion_attn_procs) custom_diffusion_layers = AttnProcsLayers(unet.attn_processors) ``` The [optimizer](https://github.com/huggingface/diffusers/blob/84cd9e8d01adb47f046b1ee449fc76a0c32dc4e2/examples/custom_diffusion/train_custom_diffusion.py#L982) is initialized to update the cross-attention layer parameters: ```py optimizer = optimizer_class( itertools.chain(text_encoder.get_input_embeddings().parameters(), custom_diffusion_layers.parameters()) if args.modifier_token is not None else custom_diffusion_layers.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) ``` In the [training loop](https://github.com/huggingface/diffusers/blob/84cd9e8d01adb47f046b1ee449fc76a0c32dc4e2/examples/custom_diffusion/train_custom_diffusion.py#L1048), it is important to only update the embeddings for the concept you're trying to learn. This means setting the gradients of all the other token embeddings to zero: ```py if args.modifier_token is not None: if accelerator.num_processes > 1: grads_text_encoder = text_encoder.module.get_input_embeddings().weight.grad else: grads_text_encoder = text_encoder.get_input_embeddings().weight.grad index_grads_to_zero = torch.arange(len(tokenizer)) != modifier_token_id[0] for i in range(len(modifier_token_id[1:])): index_grads_to_zero = index_grads_to_zero & ( torch.arange(len(tokenizer)) != modifier_token_id[i] ) grads_text_encoder.data[index_grads_to_zero, :] = grads_text_encoder.data[ index_grads_to_zero, : ].fill_(0) ``` ## Launch the script Once you’ve made all your changes or you’re okay with the default configuration, you’re ready to launch the training script! 🚀 In this guide, you'll download and use these example [cat images](https://www.cs.cmu.edu/~custom-diffusion/assets/data.zip). You can also create and use your own dataset if you want (see the [Create a dataset for training](create_dataset) guide). Set the environment variable `MODEL_NAME` to a model id on the Hub or a path to a local model, `INSTANCE_DIR` to the path where you just downloaded the cat images to, and `OUTPUT_DIR` to where you want to save the model. You'll use `<new1>` as the special word to tie the newly learned embeddings to. The script creates and saves model checkpoints and a pytorch_custom_diffusion_weights.bin file to your repository. To monitor training progress with Weights and Biases, add the `--report_to=wandb` parameter to the training command and specify a validation prompt with `--validation_prompt`. This is useful for debugging and saving intermediate results. <Tip> If you're training on human faces, the Custom Diffusion team has found the following parameters to work well: - `--learning_rate=5e-6` - `--max_train_steps` can be anywhere between 1000 and 2000 - `--freeze_model=crossattn` - use at least 15-20 images to train with </Tip> <hfoptions id="training-inference"> <hfoption id="single concept"> ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export OUTPUT_DIR="path-to-save-model" export INSTANCE_DIR="./data/cat" accelerate launch train_custom_diffusion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --class_data_dir=./real_reg/samples_cat/ \ --with_prior_preservation \ --real_prior \ --prior_loss_weight=1.0 \ --class_prompt="cat" \ --num_class_images=200 \ --instance_prompt="photo of a <new1> cat" \ --resolution=512 \ --train_batch_size=2 \ --learning_rate=1e-5 \ --lr_warmup_steps=0 \ --max_train_steps=250 \ --scale_lr \ --hflip \ --modifier_token "<new1>" \ --validation_prompt="<new1> cat sitting in a bucket" \ --report_to="wandb" \ --push_to_hub ``` </hfoption> <hfoption id="multiple concepts"> Custom Diffusion can also learn multiple concepts if you provide a [JSON](https://github.com/adobe-research/custom-diffusion/blob/main/assets/concept_list.json) file with some details about each concept it should learn. Run clip-retrieval to collect some real images to use for regularization: ```bash pip install clip-retrieval python retrieve.py --class_prompt {} --class_data_dir {} --num_class_images 200 ``` Then you can launch the script: ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export OUTPUT_DIR="path-to-save-model" accelerate launch train_custom_diffusion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --output_dir=$OUTPUT_DIR \ --concepts_list=./concept_list.json \ --with_prior_preservation \ --real_prior \ --prior_loss_weight=1.0 \ --resolution=512 \ --train_batch_size=2 \ --learning_rate=1e-5 \ --lr_warmup_steps=0 \ --max_train_steps=500 \ --num_class_images=200 \ --scale_lr \ --hflip \ --modifier_token "<new1>+<new2>" \ --push_to_hub ``` </hfoption> </hfoptions> Once training is finished, you can use your new Custom Diffusion model for inference. <hfoptions id="training-inference"> <hfoption id="single concept"> ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, ).to("cuda") pipeline.unet.load_attn_procs("path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin") pipeline.load_textual_inversion("path-to-save-model", weight_name="<new1>.bin") image = pipeline( "<new1> cat sitting in a bucket", num_inference_steps=100, guidance_scale=6.0, eta=1.0, ).images[0] image.save("cat.png") ``` </hfoption> <hfoption id="multiple concepts"> ```py import torch from huggingface_hub.repocard import RepoCard from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained("sayakpaul/custom-diffusion-cat-wooden-pot", torch_dtype=torch.float16).to("cuda") pipeline.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin") pipeline.load_textual_inversion(model_id, weight_name="<new1>.bin") pipeline.load_textual_inversion(model_id, weight_name="<new2>.bin") image = pipeline( "the <new1> cat sculpture in the style of a <new2> wooden pot", num_inference_steps=100, guidance_scale=6.0, eta=1.0, ).images[0] image.save("multi-subject.png") ``` </hfoption> </hfoptions> ## Next steps Congratulations on training a model with Custom Diffusion! 🎉 To learn more: - Read the [Multi-Concept Customization of Text-to-Image Diffusion](https://www.cs.cmu.edu/~custom-diffusion/) blog post to learn more details about the experimental results from the Custom Diffusion team.
diffusers/docs/source/en/training/custom_diffusion.md/0
{ "file_path": "diffusers/docs/source/en/training/custom_diffusion.md", "repo_id": "diffusers", "token_count": 5488 }
108
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # Train a diffusion model Unconditional image generation is a popular application of diffusion models that generates images that look like those in the dataset used for training. Typically, the best results are obtained from finetuning a pretrained model on a specific dataset. You can find many of these checkpoints on the [Hub](https://huggingface.co/search/full-text?q=unconditional-image-generation&type=model), but if you can't find one you like, you can always train your own! This tutorial will teach you how to train a [`UNet2DModel`] from scratch on a subset of the [Smithsonian Butterflies](https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset) dataset to generate your own 🦋 butterflies 🦋. <Tip> 💡 This training tutorial is based on the [Training with 🧨 Diffusers](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) notebook. For additional details and context about diffusion models like how they work, check out the notebook! </Tip> Before you begin, make sure you have 🤗 Datasets installed to load and preprocess image datasets, and 🤗 Accelerate, to simplify training on any number of GPUs. The following command will also install [TensorBoard](https://www.tensorflow.org/tensorboard) to visualize training metrics (you can also use [Weights & Biases](https://docs.wandb.ai/) to track your training). ```py # uncomment to install the necessary libraries in Colab #!pip install diffusers[training] ``` We encourage you to share your model with the community, and in order to do that, you'll need to login to your Hugging Face account (create one [here](https://hf.co/join) if you don't already have one!). You can login from a notebook and enter your token when prompted. Make sure your token has the write role. ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` Or login in from the terminal: ```bash huggingface-cli login ``` Since the model checkpoints are quite large, install [Git-LFS](https://git-lfs.com/) to version these large files: ```bash !sudo apt -qq install git-lfs !git config --global credential.helper store ``` ## Training configuration For convenience, create a `TrainingConfig` class containing the training hyperparameters (feel free to adjust them): ```py >>> from dataclasses import dataclass >>> @dataclass ... class TrainingConfig: ... image_size = 128 # the generated image resolution ... train_batch_size = 16 ... eval_batch_size = 16 # how many images to sample during evaluation ... num_epochs = 50 ... gradient_accumulation_steps = 1 ... learning_rate = 1e-4 ... lr_warmup_steps = 500 ... save_image_epochs = 10 ... save_model_epochs = 30 ... mixed_precision = "fp16" # `no` for float32, `fp16` for automatic mixed precision ... output_dir = "ddpm-butterflies-128" # the model name locally and on the HF Hub ... push_to_hub = True # whether to upload the saved model to the HF Hub ... hub_model_id = "<your-username>/<my-awesome-model>" # the name of the repository to create on the HF Hub ... hub_private_repo = False ... overwrite_output_dir = True # overwrite the old model when re-running the notebook ... seed = 0 >>> config = TrainingConfig() ``` ## Load the dataset You can easily load the [Smithsonian Butterflies](https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset) dataset with the 🤗 Datasets library: ```py >>> from datasets import load_dataset >>> config.dataset_name = "huggan/smithsonian_butterflies_subset" >>> dataset = load_dataset(config.dataset_name, split="train") ``` <Tip> 💡 You can find additional datasets from the [HugGan Community Event](https://huggingface.co/huggan) or you can use your own dataset by creating a local [`ImageFolder`](https://huggingface.co/docs/datasets/image_dataset#imagefolder). Set `config.dataset_name` to the repository id of the dataset if it is from the HugGan Community Event, or `imagefolder` if you're using your own images. </Tip> 🤗 Datasets uses the [`~datasets.Image`] feature to automatically decode the image data and load it as a [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html) which we can visualize: ```py >>> import matplotlib.pyplot as plt >>> fig, axs = plt.subplots(1, 4, figsize=(16, 4)) >>> for i, image in enumerate(dataset[:4]["image"]): ... axs[i].imshow(image) ... axs[i].set_axis_off() >>> fig.show() ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/butterflies_ds.png"/> </div> The images are all different sizes though, so you'll need to preprocess them first: * `Resize` changes the image size to the one defined in `config.image_size`. * `RandomHorizontalFlip` augments the dataset by randomly mirroring the images. * `Normalize` is important to rescale the pixel values into a [-1, 1] range, which is what the model expects. ```py >>> from torchvision import transforms >>> preprocess = transforms.Compose( ... [ ... transforms.Resize((config.image_size, config.image_size)), ... transforms.RandomHorizontalFlip(), ... transforms.ToTensor(), ... transforms.Normalize([0.5], [0.5]), ... ] ... ) ``` Use 🤗 Datasets' [`~datasets.Dataset.set_transform`] method to apply the `preprocess` function on the fly during training: ```py >>> def transform(examples): ... images = [preprocess(image.convert("RGB")) for image in examples["image"]] ... return {"images": images} >>> dataset.set_transform(transform) ``` Feel free to visualize the images again to confirm that they've been resized. Now you're ready to wrap the dataset in a [DataLoader](https://pytorch.org/docs/stable/data#torch.utils.data.DataLoader) for training! ```py >>> import torch >>> train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=config.train_batch_size, shuffle=True) ``` ## Create a UNet2DModel Pretrained models in 🧨 Diffusers are easily created from their model class with the parameters you want. For example, to create a [`UNet2DModel`]: ```py >>> from diffusers import UNet2DModel >>> model = UNet2DModel( ... sample_size=config.image_size, # the target image resolution ... in_channels=3, # the number of input channels, 3 for RGB images ... out_channels=3, # the number of output channels ... layers_per_block=2, # how many ResNet layers to use per UNet block ... block_out_channels=(128, 128, 256, 256, 512, 512), # the number of output channels for each UNet block ... down_block_types=( ... "DownBlock2D", # a regular ResNet downsampling block ... "DownBlock2D", ... "DownBlock2D", ... "DownBlock2D", ... "AttnDownBlock2D", # a ResNet downsampling block with spatial self-attention ... "DownBlock2D", ... ), ... up_block_types=( ... "UpBlock2D", # a regular ResNet upsampling block ... "AttnUpBlock2D", # a ResNet upsampling block with spatial self-attention ... "UpBlock2D", ... "UpBlock2D", ... "UpBlock2D", ... "UpBlock2D", ... ), ... ) ``` It is often a good idea to quickly check the sample image shape matches the model output shape: ```py >>> sample_image = dataset[0]["images"].unsqueeze(0) >>> print("Input shape:", sample_image.shape) Input shape: torch.Size([1, 3, 128, 128]) >>> print("Output shape:", model(sample_image, timestep=0).sample.shape) Output shape: torch.Size([1, 3, 128, 128]) ``` Great! Next, you'll need a scheduler to add some noise to the image. ## Create a scheduler The scheduler behaves differently depending on whether you're using the model for training or inference. During inference, the scheduler generates image from the noise. During training, the scheduler takes a model output - or a sample - from a specific point in the diffusion process and applies noise to the image according to a *noise schedule* and an *update rule*. Let's take a look at the [`DDPMScheduler`] and use the `add_noise` method to add some random noise to the `sample_image` from before: ```py >>> import torch >>> from PIL import Image >>> from diffusers import DDPMScheduler >>> noise_scheduler = DDPMScheduler(num_train_timesteps=1000) >>> noise = torch.randn(sample_image.shape) >>> timesteps = torch.LongTensor([50]) >>> noisy_image = noise_scheduler.add_noise(sample_image, noise, timesteps) >>> Image.fromarray(((noisy_image.permute(0, 2, 3, 1) + 1.0) * 127.5).type(torch.uint8).numpy()[0]) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/noisy_butterfly.png"/> </div> The training objective of the model is to predict the noise added to the image. The loss at this step can be calculated by: ```py >>> import torch.nn.functional as F >>> noise_pred = model(noisy_image, timesteps).sample >>> loss = F.mse_loss(noise_pred, noise) ``` ## Train the model By now, you have most of the pieces to start training the model and all that's left is putting everything together. First, you'll need an optimizer and a learning rate scheduler: ```py >>> from diffusers.optimization import get_cosine_schedule_with_warmup >>> optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate) >>> lr_scheduler = get_cosine_schedule_with_warmup( ... optimizer=optimizer, ... num_warmup_steps=config.lr_warmup_steps, ... num_training_steps=(len(train_dataloader) * config.num_epochs), ... ) ``` Then, you'll need a way to evaluate the model. For evaluation, you can use the [`DDPMPipeline`] to generate a batch of sample images and save it as a grid: ```py >>> from diffusers import DDPMPipeline >>> from diffusers.utils import make_image_grid >>> import os >>> def evaluate(config, epoch, pipeline): ... # Sample some images from random noise (this is the backward diffusion process). ... # The default pipeline output type is `List[PIL.Image]` ... images = pipeline( ... batch_size=config.eval_batch_size, ... generator=torch.Generator(device='cpu').manual_seed(config.seed), # Use a separate torch generator to avoid rewinding the random state of the main training loop ... ).images ... # Make a grid out of the images ... image_grid = make_image_grid(images, rows=4, cols=4) ... # Save the images ... test_dir = os.path.join(config.output_dir, "samples") ... os.makedirs(test_dir, exist_ok=True) ... image_grid.save(f"{test_dir}/{epoch:04d}.png") ``` Now you can wrap all these components together in a training loop with 🤗 Accelerate for easy TensorBoard logging, gradient accumulation, and mixed precision training. To upload the model to the Hub, write a function to get your repository name and information and then push it to the Hub. <Tip> 💡 The training loop below may look intimidating and long, but it'll be worth it later when you launch your training in just one line of code! If you can't wait and want to start generating images, feel free to copy and run the code below. You can always come back and examine the training loop more closely later, like when you're waiting for your model to finish training. 🤗 </Tip> ```py >>> from accelerate import Accelerator >>> from huggingface_hub import create_repo, upload_folder >>> from tqdm.auto import tqdm >>> from pathlib import Path >>> import os >>> def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler): ... # Initialize accelerator and tensorboard logging ... accelerator = Accelerator( ... mixed_precision=config.mixed_precision, ... gradient_accumulation_steps=config.gradient_accumulation_steps, ... log_with="tensorboard", ... project_dir=os.path.join(config.output_dir, "logs"), ... ) ... if accelerator.is_main_process: ... if config.output_dir is not None: ... os.makedirs(config.output_dir, exist_ok=True) ... if config.push_to_hub: ... repo_id = create_repo( ... repo_id=config.hub_model_id or Path(config.output_dir).name, exist_ok=True ... ).repo_id ... accelerator.init_trackers("train_example") ... # Prepare everything ... # There is no specific order to remember, you just need to unpack the ... # objects in the same order you gave them to the prepare method. ... model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( ... model, optimizer, train_dataloader, lr_scheduler ... ) ... global_step = 0 ... # Now you train the model ... for epoch in range(config.num_epochs): ... progress_bar = tqdm(total=len(train_dataloader), disable=not accelerator.is_local_main_process) ... progress_bar.set_description(f"Epoch {epoch}") ... for step, batch in enumerate(train_dataloader): ... clean_images = batch["images"] ... # Sample noise to add to the images ... noise = torch.randn(clean_images.shape, device=clean_images.device) ... bs = clean_images.shape[0] ... # Sample a random timestep for each image ... timesteps = torch.randint( ... 0, noise_scheduler.config.num_train_timesteps, (bs,), device=clean_images.device, ... dtype=torch.int64 ... ) ... # Add noise to the clean images according to the noise magnitude at each timestep ... # (this is the forward diffusion process) ... noisy_images = noise_scheduler.add_noise(clean_images, noise, timesteps) ... with accelerator.accumulate(model): ... # Predict the noise residual ... noise_pred = model(noisy_images, timesteps, return_dict=False)[0] ... loss = F.mse_loss(noise_pred, noise) ... accelerator.backward(loss) ... if accelerator.sync_gradients: ... accelerator.clip_grad_norm_(model.parameters(), 1.0) ... optimizer.step() ... lr_scheduler.step() ... optimizer.zero_grad() ... progress_bar.update(1) ... logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step} ... progress_bar.set_postfix(**logs) ... accelerator.log(logs, step=global_step) ... global_step += 1 ... # After each epoch you optionally sample some demo images with evaluate() and save the model ... if accelerator.is_main_process: ... pipeline = DDPMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler) ... if (epoch + 1) % config.save_image_epochs == 0 or epoch == config.num_epochs - 1: ... evaluate(config, epoch, pipeline) ... if (epoch + 1) % config.save_model_epochs == 0 or epoch == config.num_epochs - 1: ... if config.push_to_hub: ... upload_folder( ... repo_id=repo_id, ... folder_path=config.output_dir, ... commit_message=f"Epoch {epoch}", ... ignore_patterns=["step_*", "epoch_*"], ... ) ... else: ... pipeline.save_pretrained(config.output_dir) ``` Phew, that was quite a bit of code! But you're finally ready to launch the training with 🤗 Accelerate's [`~accelerate.notebook_launcher`] function. Pass the function the training loop, all the training arguments, and the number of processes (you can change this value to the number of GPUs available to you) to use for training: ```py >>> from accelerate import notebook_launcher >>> args = (config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler) >>> notebook_launcher(train_loop, args, num_processes=1) ``` Once training is complete, take a look at the final 🦋 images 🦋 generated by your diffusion model! ```py >>> import glob >>> sample_images = sorted(glob.glob(f"{config.output_dir}/samples/*.png")) >>> Image.open(sample_images[-1]) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/butterflies_final.png"/> </div> ## Next steps Unconditional image generation is one example of a task that can be trained. You can explore other tasks and training techniques by visiting the [🧨 Diffusers Training Examples](../training/overview) page. Here are some examples of what you can learn: * [Textual Inversion](../training/text_inversion), an algorithm that teaches a model a specific visual concept and integrates it into the generated image. * [DreamBooth](../training/dreambooth), a technique for generating personalized images of a subject given several input images of the subject. * [Guide](../training/text2image) to finetuning a Stable Diffusion model on your own dataset. * [Guide](../training/lora) to using LoRA, a memory-efficient technique for finetuning really large models faster.
diffusers/docs/source/en/tutorials/basic_training.md/0
{ "file_path": "diffusers/docs/source/en/tutorials/basic_training.md", "repo_id": "diffusers", "token_count": 6242 }
109
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Inpainting [[open-in-colab]] Inpainting replaces or edits specific areas of an image. This makes it a useful tool for image restoration like removing defects and artifacts, or even replacing an image area with something entirely new. Inpainting relies on a mask to determine which regions of an image to fill in; the area to inpaint is represented by white pixels and the area to keep is represented by black pixels. The white pixels are filled in by the prompt. With 🤗 Diffusers, here is how you can do inpainting: 1. Load an inpainting checkpoint with the [`AutoPipelineForInpainting`] class. This'll automatically detect the appropriate pipeline class to load based on the checkpoint: ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() ``` <Tip> You'll notice throughout the guide, we use [`~DiffusionPipeline.enable_model_cpu_offload`] and [`~DiffusionPipeline.enable_xformers_memory_efficient_attention`], to save memory and increase inference speed. If you're using PyTorch 2.0, it's not necessary to call [`~DiffusionPipeline.enable_xformers_memory_efficient_attention`] on your pipeline because it'll already be using PyTorch 2.0's native [scaled-dot product attention](../optimization/torch2.0#scaled-dot-product-attention). </Tip> 2. Load the base and mask images: ```py init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") ``` 3. Create a prompt to inpaint the image with and pass it to the pipeline with the base and mask images: ```py prompt = "a black cat with glowing eyes, cute, adorable, disney, pixar, highly detailed, 8k" negative_prompt = "bad anatomy, deformed, ugly, disfigured" image = pipeline(prompt=prompt, negative_prompt=negative_prompt, image=init_image, mask_image=mask_image).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">base image</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">mask image</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-cat.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption> </div> </div> ## Create a mask image Throughout this guide, the mask image is provided in all of the code examples for convenience. You can inpaint on your own images, but you'll need to create a mask image for it. Use the Space below to easily create a mask image. Upload a base image to inpaint on and use the sketch tool to draw a mask. Once you're done, click **Run** to generate and download the mask image. <iframe src="https://stevhliu-inpaint-mask-maker.hf.space" frameborder="0" width="850" height="450" ></iframe> ### Mask blur The [`~VaeImageProcessor.blur`] method provides an option for how to blend the original image and inpaint area. The amount of blur is determined by the `blur_factor` parameter. Increasing the `blur_factor` increases the amount of blur applied to the mask edges, softening the transition between the original image and inpaint area. A low or zero `blur_factor` preserves the sharper edges of the mask. To use this, create a blurred mask with the image processor. ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image from PIL import Image pipeline = AutoPipelineForInpainting.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to('cuda') mask = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/seashore_mask.png") blurred_mask = pipeline.mask_processor.blur(mask, blur_factor=33) blurred_mask ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/seashore_mask.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">mask with no blur</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/mask_blurred.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">mask with blur applied</figcaption> </div> </div> ## Popular models [Stable Diffusion Inpainting](https://huggingface.co/runwayml/stable-diffusion-inpainting), [Stable Diffusion XL (SDXL) Inpainting](https://huggingface.co/diffusers/stable-diffusion-xl-1.0-inpainting-0.1), and [Kandinsky 2.2 Inpainting](https://huggingface.co/kandinsky-community/kandinsky-2-2-decoder-inpaint) are among the most popular models for inpainting. SDXL typically produces higher resolution images than Stable Diffusion v1.5, and Kandinsky 2.2 is also capable of generating high-quality images. ### Stable Diffusion Inpainting Stable Diffusion Inpainting is a latent diffusion model finetuned on 512x512 images on inpainting. It is a good starting point because it is relatively fast and generates good quality images. To use this model for inpainting, you'll need to pass a prompt, base and mask image to the pipeline: ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") generator = torch.Generator("cuda").manual_seed(92) prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, generator=generator).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` ### Stable Diffusion XL (SDXL) Inpainting SDXL is a larger and more powerful version of Stable Diffusion v1.5. This model can follow a two-stage model process (though each model can also be used alone); the base model generates an image, and a refiner model takes that image and further enhances its details and quality. Take a look at the [SDXL](sdxl) guide for a more comprehensive guide on how to use SDXL and configure it's parameters. ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") generator = torch.Generator("cuda").manual_seed(92) prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, generator=generator).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` ### Kandinsky 2.2 Inpainting The Kandinsky model family is similar to SDXL because it uses two models as well; the image prior model creates image embeddings, and the diffusion model generates images from them. You can load the image prior and diffusion model separately, but the easiest way to use Kandinsky 2.2 is to load it into the [`AutoPipelineForInpainting`] class which uses the [`KandinskyV22InpaintCombinedPipeline`] under the hood. ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") generator = torch.Generator("cuda").manual_seed(92) prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, generator=generator).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">base image</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-sdv1.5.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Stable Diffusion Inpainting</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-sdxl.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Stable Diffusion XL Inpainting</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-kandinsky.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Kandinsky 2.2 Inpainting</figcaption> </div> </div> ## Non-inpaint specific checkpoints So far, this guide has used inpaint specific checkpoints such as [runwayml/stable-diffusion-inpainting](https://huggingface.co/runwayml/stable-diffusion-inpainting). But you can also use regular checkpoints like [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5). Let's compare the results of the two checkpoints. The image on the left is generated from a regular checkpoint, and the image on the right is from an inpaint checkpoint. You'll immediately notice the image on the left is not as clean, and you can still see the outline of the area the model is supposed to inpaint. The image on the right is much cleaner and the inpainted area appears more natural. <hfoptions id="regular-specific"> <hfoption id="runwayml/stable-diffusion-v1-5"> ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") generator = torch.Generator("cuda").manual_seed(92) prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, generator=generator).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` </hfoption> <hfoption id="runwayml/stable-diffusion-inpainting"> ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") generator = torch.Generator("cuda").manual_seed(92) prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, generator=generator).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` </hfoption> </hfoptions> <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/non-inpaint-specific.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">runwayml/stable-diffusion-v1-5</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-specific.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">runwayml/stable-diffusion-inpainting</figcaption> </div> </div> However, for more basic tasks like erasing an object from an image (like the rocks in the road for example), a regular checkpoint yields pretty good results. There isn't as noticeable of difference between the regular and inpaint checkpoint. <hfoptions id="inpaint"> <hfoption id="runwayml/stable-diffusion-v1-5"> ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/road-mask.png") image = pipeline(prompt="road", image=init_image, mask_image=mask_image).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` </hfoption> <hfoption id="runwayml/stable-diffusion-inpaint"> ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/road-mask.png") image = pipeline(prompt="road", image=init_image, mask_image=mask_image).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` </hfoption> </hfoptions> <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/regular-inpaint-basic.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">runwayml/stable-diffusion-v1-5</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/specific-inpaint-basic.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">runwayml/stable-diffusion-inpainting</figcaption> </div> </div> The trade-off of using a non-inpaint specific checkpoint is the overall image quality may be lower, but it generally tends to preserve the mask area (that is why you can see the mask outline). The inpaint specific checkpoints are intentionally trained to generate higher quality inpainted images, and that includes creating a more natural transition between the masked and unmasked areas. As a result, these checkpoints are more likely to change your unmasked area. If preserving the unmasked area is important for your task, you can use the [`VaeImageProcessor.apply_overlay`] method to force the unmasked area of an image to remain the same at the expense of some more unnatural transitions between the masked and unmasked areas. ```py import PIL import numpy as np import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid device = "cuda" pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, ) pipeline = pipeline.to(device) img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" init_image = load_image(img_url).resize((512, 512)) mask_image = load_image(mask_url).resize((512, 512)) prompt = "Face of a yellow cat, high resolution, sitting on a park bench" repainted_image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image).images[0] repainted_image.save("repainted_image.png") unmasked_unchanged_image = pipeline.image_processor.apply_overlay(mask_image, init_image, repainted_image) unmasked_unchanged_image.save("force_unmasked_unchanged.png") make_image_grid([init_image, mask_image, repainted_image, unmasked_unchanged_image], rows=2, cols=2) ``` ## Configure pipeline parameters Image features - like quality and "creativity" - are dependent on pipeline parameters. Knowing what these parameters do is important for getting the results you want. Let's take a look at the most important parameters and see how changing them affects the output. ### Strength `strength` is a measure of how much noise is added to the base image, which influences how similar the output is to the base image. * 📈 a high `strength` value means more noise is added to an image and the denoising process takes longer, but you'll get higher quality images that are more different from the base image * 📉 a low `strength` value means less noise is added to an image and the denoising process is faster, but the image quality may not be as great and the generated image resembles the base image more ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, strength=0.6).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-strength-0.6.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">strength = 0.6</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-strength-0.8.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">strength = 0.8</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-strength-1.0.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">strength = 1.0</figcaption> </div> </div> ### Guidance scale `guidance_scale` affects how aligned the text prompt and generated image are. * 📈 a high `guidance_scale` value means the prompt and generated image are closely aligned, so the output is a stricter interpretation of the prompt * 📉 a low `guidance_scale` value means the prompt and generated image are more loosely aligned, so the output may be more varied from the prompt You can use `strength` and `guidance_scale` together for more control over how expressive the model is. For example, a combination high `strength` and `guidance_scale` values gives the model the most creative freedom. ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, guidance_scale=2.5).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-guidance-2.5.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale = 2.5</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-guidance-7.5.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale = 7.5</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-guidance-12.5.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale = 12.5</figcaption> </div> </div> ### Negative prompt A negative prompt assumes the opposite role of a prompt; it guides the model away from generating certain things in an image. This is useful for quickly improving image quality and preventing the model from generating things you don't want. ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" negative_prompt = "bad architecture, unstable, poor details, blurry" image = pipeline(prompt=prompt, negative_prompt=negative_prompt, image=init_image, mask_image=mask_image).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` <div class="flex justify-center"> <figure> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-negative.png" /> <figcaption class="text-center">negative_prompt = "bad architecture, unstable, poor details, blurry"</figcaption> </figure> </div> ### Padding mask crop A method for increasing the inpainting image quality is to use the [`padding_mask_crop`](https://huggingface.co/docs/diffusers/v0.25.0/en/api/pipelines/stable_diffusion/inpaint#diffusers.StableDiffusionInpaintPipeline.__call__.padding_mask_crop) parameter. When enabled, this option crops the masked area with some user-specified padding and it'll also crop the same area from the original image. Both the image and mask are upscaled to a higher resolution for inpainting, and then overlaid on the original image. This is a quick and easy way to improve image quality without using a separate pipeline like [`StableDiffusionUpscalePipeline`]. Add the `padding_mask_crop` parameter to the pipeline call and set it to the desired padding value. ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image from PIL import Image generator = torch.Generator(device='cuda').manual_seed(0) pipeline = AutoPipelineForInpainting.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to('cuda') base = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/seashore.png") mask = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/seashore_mask.png") image = pipeline("boat", image=base, mask_image=mask, strength=0.75, generator=generator, padding_mask_crop=32).images[0] image ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/baseline_inpaint.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">default inpaint image</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/padding_mask_crop_inpaint.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">inpaint image with `padding_mask_crop` enabled</figcaption> </div> </div> ## Chained inpainting pipelines [`AutoPipelineForInpainting`] can be chained with other 🤗 Diffusers pipelines to edit their outputs. This is often useful for improving the output quality from your other diffusion pipelines, and if you're using multiple pipelines, it can be more memory-efficient to chain them together to keep the outputs in latent space and reuse the same pipeline components. ### Text-to-image-to-inpaint Chaining a text-to-image and inpainting pipeline allows you to inpaint the generated image, and you don't have to provide a base image to begin with. This makes it convenient to edit your favorite text-to-image outputs without having to generate an entirely new image. Start with the text-to-image pipeline to create a castle: ```py import torch from diffusers import AutoPipelineForText2Image, AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForText2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() text2image = pipeline("concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k").images[0] ``` Load the mask image of the output from above: ```py mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_text-chain-mask.png") ``` And let's inpaint the masked area with a waterfall: ```py pipeline = AutoPipelineForInpainting.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() prompt = "digital painting of a fantasy waterfall, cloudy" image = pipeline(prompt=prompt, image=text2image, mask_image=mask_image).images[0] make_image_grid([text2image, mask_image, image], rows=1, cols=3) ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-text-chain.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">text-to-image</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-text-chain-out.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">inpaint</figcaption> </div> </div> ### Inpaint-to-image-to-image You can also chain an inpainting pipeline before another pipeline like image-to-image or an upscaler to improve the quality. Begin by inpainting an image: ```py import torch from diffusers import AutoPipelineForInpainting, AutoPipelineForImage2Image from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image_inpainting = pipeline(prompt=prompt, image=init_image, mask_image=mask_image).images[0] # resize image to 1024x1024 for SDXL image_inpainting = image_inpainting.resize((1024, 1024)) ``` Now let's pass the image to another inpainting pipeline with SDXL's refiner model to enhance the image details and quality: ```py pipeline = AutoPipelineForInpainting.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() image = pipeline(prompt=prompt, image=image_inpainting, mask_image=mask_image, output_type="latent").images[0] ``` <Tip> It is important to specify `output_type="latent"` in the pipeline to keep all the outputs in latent space to avoid an unnecessary decode-encode step. This only works if the chained pipelines are using the same VAE. For example, in the [Text-to-image-to-inpaint](#text-to-image-to-inpaint) section, Kandinsky 2.2 uses a different VAE class than the Stable Diffusion model so it won't work. But if you use Stable Diffusion v1.5 for both pipelines, then you can keep everything in latent space because they both use [`AutoencoderKL`]. </Tip> Finally, you can pass this image to an image-to-image pipeline to put the finishing touches on it. It is more efficient to use the [`~AutoPipelineForImage2Image.from_pipe`] method to reuse the existing pipeline components, and avoid unnecessarily loading all the pipeline components into memory again. ```py pipeline = AutoPipelineForImage2Image.from_pipe(pipeline) # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() image = pipeline(prompt=prompt, image=image).images[0] make_image_grid([init_image, mask_image, image_inpainting, image], rows=2, cols=2) ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">initial image</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-to-image-chain.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">inpaint</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-to-image-final.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">image-to-image</figcaption> </div> </div> Image-to-image and inpainting are actually very similar tasks. Image-to-image generates a new image that resembles the existing provided image. Inpainting does the same thing, but it only transforms the image area defined by the mask and the rest of the image is unchanged. You can think of inpainting as a more precise tool for making specific changes and image-to-image has a broader scope for making more sweeping changes. ## Control image generation Getting an image to look exactly the way you want is challenging because the denoising process is random. While you can control certain aspects of generation by configuring parameters like `negative_prompt`, there are better and more efficient methods for controlling image generation. ### Prompt weighting Prompt weighting provides a quantifiable way to scale the representation of concepts in a prompt. You can use it to increase or decrease the magnitude of the text embedding vector for each concept in the prompt, which subsequently determines how much of each concept is generated. The [Compel](https://github.com/damian0815/compel) library offers an intuitive syntax for scaling the prompt weights and generating the embeddings. Learn how to create the embeddings in the [Prompt weighting](../using-diffusers/weighted_prompts) guide. Once you've generated the embeddings, pass them to the `prompt_embeds` (and `negative_prompt_embeds` if you're using a negative prompt) parameter in the [`AutoPipelineForInpainting`]. The embeddings replace the `prompt` parameter: ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() image = pipeline(prompt_embeds=prompt_embeds, # generated from Compel negative_prompt_embeds=negative_prompt_embeds, # generated from Compel image=init_image, mask_image=mask_image ).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` ### ControlNet ControlNet models are used with other diffusion models like Stable Diffusion, and they provide an even more flexible and accurate way to control how an image is generated. A ControlNet accepts an additional conditioning image input that guides the diffusion model to preserve the features in it. For example, let's condition an image with a ControlNet pretrained on inpaint images: ```py import torch import numpy as np from diffusers import ControlNetModel, StableDiffusionControlNetInpaintPipeline from diffusers.utils import load_image, make_image_grid # load ControlNet controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16, variant="fp16") # pass ControlNet to the pipeline pipeline = StableDiffusionControlNetInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", controlnet=controlnet, torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") # prepare control image def make_inpaint_condition(init_image, mask_image): init_image = np.array(init_image.convert("RGB")).astype(np.float32) / 255.0 mask_image = np.array(mask_image.convert("L")).astype(np.float32) / 255.0 assert init_image.shape[0:1] == mask_image.shape[0:1], "image and image_mask must have the same image size" init_image[mask_image > 0.5] = -1.0 # set as masked pixel init_image = np.expand_dims(init_image, 0).transpose(0, 3, 1, 2) init_image = torch.from_numpy(init_image) return init_image control_image = make_inpaint_condition(init_image, mask_image) ``` Now generate an image from the base, mask and control images. You'll notice features of the base image are strongly preserved in the generated image. ```py prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, control_image=control_image).images[0] make_image_grid([init_image, mask_image, PIL.Image.fromarray(np.uint8(control_image[0][0])).convert('RGB'), image], rows=2, cols=2) ``` You can take this a step further and chain it with an image-to-image pipeline to apply a new [style](https://huggingface.co/nitrosocke/elden-ring-diffusion): ```py from diffusers import AutoPipelineForImage2Image pipeline = AutoPipelineForImage2Image.from_pretrained( "nitrosocke/elden-ring-diffusion", torch_dtype=torch.float16, ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() prompt = "elden ring style castle" # include the token "elden ring style" in the prompt negative_prompt = "bad architecture, deformed, disfigured, poor details" image_elden_ring = pipeline(prompt, negative_prompt=negative_prompt, image=image).images[0] make_image_grid([init_image, mask_image, image, image_elden_ring], rows=2, cols=2) ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">initial image</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-controlnet.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">ControlNet inpaint</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-img2img.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">image-to-image</figcaption> </div> </div> ## Optimize It can be difficult and slow to run diffusion models if you're resource constrained, but it doesn't have to be with a few optimization tricks. One of the biggest (and easiest) optimizations you can enable is switching to memory-efficient attention. If you're using PyTorch 2.0, [scaled-dot product attention](../optimization/torch2.0#scaled-dot-product-attention) is automatically enabled and you don't need to do anything else. For non-PyTorch 2.0 users, you can install and use [xFormers](../optimization/xformers)'s implementation of memory-efficient attention. Both options reduce memory usage and accelerate inference. You can also offload the model to the CPU to save even more memory: ```diff + pipeline.enable_xformers_memory_efficient_attention() + pipeline.enable_model_cpu_offload() ``` To speed-up your inference code even more, use [`torch_compile`](../optimization/torch2.0#torchcompile). You should wrap `torch.compile` around the most intensive component in the pipeline which is typically the UNet: ```py pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True) ``` Learn more in the [Reduce memory usage](../optimization/memory) and [Torch 2.0](../optimization/torch2.0) guides.
diffusers/docs/source/en/using-diffusers/inpaint.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/inpaint.md", "repo_id": "diffusers", "token_count": 14111 }
110
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Shap-E [[open-in-colab]] Shap-E is a conditional model for generating 3D assets which could be used for video game development, interior design, and architecture. It is trained on a large dataset of 3D assets, and post-processed to render more views of each object and produce 16K instead of 4K point clouds. The Shap-E model is trained in two steps: 1. an encoder accepts the point clouds and rendered views of a 3D asset and outputs the parameters of implicit functions that represent the asset 2. a diffusion model is trained on the latents produced by the encoder to generate either neural radiance fields (NeRFs) or a textured 3D mesh, making it easier to render and use the 3D asset in downstream applications This guide will show you how to use Shap-E to start generating your own 3D assets! Before you begin, make sure you have the following libraries installed: ```py # uncomment to install the necessary libraries in Colab #!pip install -q diffusers transformers accelerate trimesh ``` ## Text-to-3D To generate a gif of a 3D object, pass a text prompt to the [`ShapEPipeline`]. The pipeline generates a list of image frames which are used to create the 3D object. ```py import torch from diffusers import ShapEPipeline device = torch.device("cuda" if torch.cuda.is_available() else "cpu") pipe = ShapEPipeline.from_pretrained("openai/shap-e", torch_dtype=torch.float16, variant="fp16") pipe = pipe.to(device) guidance_scale = 15.0 prompt = ["A firecracker", "A birthday cupcake"] images = pipe( prompt, guidance_scale=guidance_scale, num_inference_steps=64, frame_size=256, ).images ``` 이제 [`~utils.export_to_gif`] 함수를 사용해 이미지 프레임 리스트를 3D 오브젝트의 gif로 변환합니다. ```py from diffusers.utils import export_to_gif export_to_gif(images[0], "firecracker_3d.gif") export_to_gif(images[1], "cake_3d.gif") ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/shap_e/firecracker_out.gif"/> <figcaption class="mt-2 text-center text-sm text-gray-500">prompt = "A firecracker"</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/shap_e/cake_out.gif"/> <figcaption class="mt-2 text-center text-sm text-gray-500">prompt = "A birthday cupcake"</figcaption> </div> </div> ## Image-to-3D To generate a 3D object from another image, use the [`ShapEImg2ImgPipeline`]. You can use an existing image or generate an entirely new one. Let's use the [Kandinsky 2.1](../api/pipelines/kandinsky) model to generate a new image. ```py from diffusers import DiffusionPipeline import torch prior_pipeline = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16, use_safetensors=True).to("cuda") pipeline = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16, use_safetensors=True).to("cuda") prompt = "A cheeseburger, white background" image_embeds, negative_image_embeds = prior_pipeline(prompt, guidance_scale=1.0).to_tuple() image = pipeline( prompt, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, ).images[0] image.save("burger.png") ``` Pass the cheeseburger to the [`ShapEImg2ImgPipeline`] to generate a 3D representation of it. ```py from PIL import Image from diffusers import ShapEImg2ImgPipeline from diffusers.utils import export_to_gif pipe = ShapEImg2ImgPipeline.from_pretrained("openai/shap-e-img2img", torch_dtype=torch.float16, variant="fp16").to("cuda") guidance_scale = 3.0 image = Image.open("burger.png").resize((256, 256)) images = pipe( image, guidance_scale=guidance_scale, num_inference_steps=64, frame_size=256, ).images gif_path = export_to_gif(images[0], "burger_3d.gif") ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/shap_e/burger_in.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">cheeseburger</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/shap_e/burger_out.gif"/> <figcaption class="mt-2 text-center text-sm text-gray-500">3D cheeseburger</figcaption> </div> </div> ## Generate mesh Shap-E is a flexible model that can also generate textured mesh outputs to be rendered for downstream applications. In this example, you'll convert the output into a `glb` file because the 🤗 Datasets library supports mesh visualization of `glb` files which can be rendered by the [Dataset viewer](https://huggingface.co/docs/hub/datasets-viewer#dataset-preview). You can generate mesh outputs for both the [`ShapEPipeline`] and [`ShapEImg2ImgPipeline`] by specifying the `output_type` parameter as `"mesh"`: ```py import torch from diffusers import ShapEPipeline device = torch.device("cuda" if torch.cuda.is_available() else "cpu") pipe = ShapEPipeline.from_pretrained("openai/shap-e", torch_dtype=torch.float16, variant="fp16") pipe = pipe.to(device) guidance_scale = 15.0 prompt = "A birthday cupcake" images = pipe(prompt, guidance_scale=guidance_scale, num_inference_steps=64, frame_size=256, output_type="mesh").images ``` Use the [`~utils.export_to_ply`] function to save the mesh output as a `ply` file: <Tip> You can optionally save the mesh output as an `obj` file with the [`~utils.export_to_obj`] function. The ability to save the mesh output in a variety of formats makes it more flexible for downstream usage! </Tip> ```py from diffusers.utils import export_to_ply ply_path = export_to_ply(images[0], "3d_cake.ply") print(f"Saved to folder: {ply_path}") ``` Then you can convert the `ply` file to a `glb` file with the trimesh library: ```py import trimesh mesh = trimesh.load("3d_cake.ply") mesh_export = mesh.export("3d_cake.glb", file_type="glb") ``` By default, the mesh output is focused from the bottom viewpoint but you can change the default viewpoint by applying a rotation transform: ```py import trimesh import numpy as np mesh = trimesh.load("3d_cake.ply") rot = trimesh.transformations.rotation_matrix(-np.pi / 2, [1, 0, 0]) mesh = mesh.apply_transform(rot) mesh_export = mesh.export("3d_cake.glb", file_type="glb") ``` Upload the mesh file to your dataset repository to visualize it with the Dataset viewer! <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/3D-cake.gif"/> </div>
diffusers/docs/source/en/using-diffusers/shap-e.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/shap-e.md", "repo_id": "diffusers", "token_count": 2540 }
111
- sections: - local: index title: 🧨 Diffusers - local: quicktour title: "훑어보기" - local: stable_diffusion title: Stable Diffusion - local: installation title: 설치 title: 시작하기 - sections: - local: tutorials/tutorial_overview title: 개요 - local: using-diffusers/write_own_pipeline title: 모델과 스케줄러 이해하기 - local: in_translation # tutorials/autopipeline title: (번역중) AutoPipeline - local: tutorials/basic_training title: Diffusion 모델 학습하기 - local: in_translation # tutorials/using_peft_for_inference title: (번역중) 추론을 위한 LoRAs 불러오기 - local: in_translation # tutorials/fast_diffusion title: (번역중) Text-to-image diffusion 모델 추론 가속화하기 - local: in_translation # tutorials/inference_with_big_models title: (번역중) 큰 모델로 작업하기 title: 튜토리얼 - sections: - local: using-diffusers/loading title: 파이프라인 불러오기 - local: using-diffusers/custom_pipeline_overview title: 커뮤니티 파이프라인과 컴포넌트 불러오기 - local: using-diffusers/schedulers title: 스케줄러와 모델 불러오기 - local: using-diffusers/other-formats title: 모델 파일과 레이아웃 - local: using-diffusers/loading_adapters title: 어댑터 불러오기 - local: using-diffusers/push_to_hub title: 파일들을 Hub로 푸시하기 title: 파이프라인과 어댑터 불러오기 - sections: - local: using-diffusers/unconditional_image_generation title: Unconditional 이미지 생성 - local: using-diffusers/conditional_image_generation title: Text-to-image - local: using-diffusers/img2img title: Image-to-image - local: using-diffusers/inpaint title: 인페인팅 - local: in_translation # using-diffusers/text-img2vid title: (번역중) Text 또는 image-to-video - local: using-diffusers/depth2img title: Depth-to-image title: 생성 태스크 - sections: - local: in_translation # using-diffusers/overview_techniques title: (번역중) 개요 - local: training/distributed_inference title: 여러 GPU를 사용한 분산 추론 - local: in_translation # using-diffusers/merge_loras title: (번역중) LoRA 병합 - local: in_translation # using-diffusers/scheduler_features title: (번역중) 스케줄러 기능 - local: in_translation # using-diffusers/callback title: (번역중) 파이프라인 콜백 - local: in_translation # using-diffusers/reusing_seeds title: (번역중) 재현 가능한 파이프라인 - local: in_translation # using-diffusers/image_quality title: (번역중) 이미지 퀄리티 조절하기 - local: using-diffusers/weighted_prompts title: 프롬프트 기술 title: 추론 테크닉 - sections: - local: in_translation # advanced_inference/outpaint title: (번역중) Outpainting title: 추론 심화 - sections: - local: in_translation # using-diffusers/sdxl title: (번역중) Stable Diffusion XL - local: using-diffusers/sdxl_turbo title: SDXL Turbo - local: using-diffusers/kandinsky title: Kandinsky - local: in_translation # using-diffusers/ip_adapter title: (번역중) IP-Adapter - local: in_translation # using-diffusers/pag title: (번역중) PAG - local: in_translation # using-diffusers/controlnet title: (번역중) ControlNet - local: in_translation # using-diffusers/t2i_adapter title: (번역중) T2I-Adapter - local: in_translation # using-diffusers/inference_with_lcm title: (번역중) Latent Consistency Model - local: using-diffusers/textual_inversion_inference title: Textual inversion - local: using-diffusers/shap-e title: Shap-E - local: using-diffusers/diffedit title: DiffEdit - local: in_translation # using-diffusers/inference_with_tcd_lora title: (번역중) Trajectory Consistency Distillation-LoRA - local: using-diffusers/svd title: Stable Video Diffusion - local: in_translation # using-diffusers/marigold_usage title: (번역중) Marigold 컴퓨터 비전 title: 특정 파이프라인 예시 - sections: - local: training/overview title: 개요 - local: training/create_dataset title: 학습을 위한 데이터셋 생성하기 - local: training/adapt_a_model title: 새로운 태스크에 모델 적용하기 - isExpanded: false sections: - local: training/unconditional_training title: Unconditional 이미지 생성 - local: training/text2image title: Text-to-image - local: in_translation # training/sdxl title: (번역중) Stable Diffusion XL - local: in_translation # training/kandinsky title: (번역중) Kandinsky 2.2 - local: in_translation # training/wuerstchen title: (번역중) Wuerstchen - local: training/controlnet title: ControlNet - local: in_translation # training/t2i_adapters title: (번역중) T2I-Adapters - local: training/instructpix2pix title: InstructPix2Pix title: 모델 - isExpanded: false sections: - local: training/text_inversion title: Textual Inversion - local: training/dreambooth title: DreamBooth - local: training/lora title: LoRA - local: training/custom_diffusion title: Custom Diffusion - local: in_translation # training/lcm_distill title: (번역중) Latent Consistency Distillation - local: in_translation # training/ddpo title: (번역중) DDPO 강화학습 훈련 title: 메서드 title: 학습 - sections: - local: optimization/fp16 title: 추론 스피드업 - local: in_translation # optimization/memory title: (번역중) 메모리 사용량 줄이기 - local: optimization/torch2.0 title: PyTorch 2.0 - local: optimization/xformers title: xFormers - local: optimization/tome title: Token merging - local: in_translation # optimization/deepcache title: (번역중) DeepCache - local: in_translation # optimization/tgate title: (번역중) TGATE - sections: - local: using-diffusers/stable_diffusion_jax_how_to title: JAX/Flax - local: optimization/onnx title: ONNX - local: optimization/open_vino title: OpenVINO - local: optimization/coreml title: Core ML title: 최적화된 모델 형식 - sections: - local: optimization/mps title: Metal Performance Shaders (MPS) - local: optimization/habana title: Habana Gaudi title: 최적화된 하드웨어 title: 추론 가속화와 메모리 줄이기 - sections: - local: conceptual/philosophy title: 철학 - local: using-diffusers/controlling_generation title: 제어된 생성 - local: conceptual/contribution title: 어떻게 기여하나요? - local: conceptual/ethical_guidelines title: Diffusers의 윤리적 가이드라인 - local: conceptual/evaluation title: Diffusion Models 평가하기 title: 개념 가이드 - sections: - sections: - sections: - local: api/pipelines/stable_diffusion/stable_diffusion_xl title: Stable Diffusion XL title: Stable Diffusion title: Pipelines title: API
diffusers/docs/source/ko/_toctree.yml/0
{ "file_path": "diffusers/docs/source/ko/_toctree.yml", "repo_id": "diffusers", "token_count": 3468 }
112
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Diffusers에서의 PyTorch 2.0 가속화 지원 `0.13.0` 버전부터 Diffusers는 [PyTorch 2.0](https://pytorch.org/get-started/pytorch-2.0/)에서의 최신 최적화를 지원합니다. 이는 다음을 포함됩니다. 1. momory-efficient attention을 사용한 가속화된 트랜스포머 지원 - `xformers`같은 추가적인 dependencies 필요 없음 2. 추가 성능 향상을 위한 개별 모델에 대한 컴파일 기능 [torch.compile](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) 지원 ## 설치 가속화된 어텐션 구현과 및 `torch.compile()`을 사용하기 위해, pip에서 최신 버전의 PyTorch 2.0을 설치되어 있고 diffusers 0.13.0. 버전 이상인지 확인하세요. 아래 설명된 바와 같이, PyTorch 2.0이 활성화되어 있을 때 diffusers는 최적화된 어텐션 프로세서([`AttnProcessor2_0`](https://github.com/huggingface/diffusers/blob/1a5797c6d4491a879ea5285c4efc377664e0332d/src/diffusers/models/attention_processor.py#L798))를 사용합니다. ```bash pip install --upgrade torch diffusers ``` ## 가속화된 트랜스포머와 `torch.compile` 사용하기. 1. **가속화된 트랜스포머 구현** PyTorch 2.0에는 [`torch.nn.functional.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention) 함수를 통해 최적화된 memory-efficient attention의 구현이 포함되어 있습니다. 이는 입력 및 GPU 유형에 따라 여러 최적화를 자동으로 활성화합니다. 이는 [xFormers](https://github.com/facebookresearch/xformers)의 `memory_efficient_attention`과 유사하지만 기본적으로 PyTorch에 내장되어 있습니다. 이러한 최적화는 PyTorch 2.0이 설치되어 있고 `torch.nn.functional.scaled_dot_product_attention`을 사용할 수 있는 경우 Diffusers에서 기본적으로 활성화됩니다. 이를 사용하려면 `torch 2.0`을 설치하고 파이프라인을 사용하기만 하면 됩니다. 예를 들어: ```Python import torch from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" image = pipe(prompt).images[0] ``` 이를 명시적으로 활성화하려면(필수는 아님) 아래와 같이 수행할 수 있습니다. ```diff import torch from diffusers import DiffusionPipeline + from diffusers.models.attention_processor import AttnProcessor2_0 pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda") + pipe.unet.set_attn_processor(AttnProcessor2_0()) prompt = "a photo of an astronaut riding a horse on mars" image = pipe(prompt).images[0] ``` 이 실행 과정은 `xFormers`만큼 빠르고 메모리적으로 효율적이어야 합니다. 자세한 내용은 [벤치마크](#benchmark)에서 확인하세요. 파이프라인을 보다 deterministic으로 만들거나 파인 튜닝된 모델을 [Core ML](https://huggingface.co/docs/diffusers/v0.16.0/en/optimization/coreml#how-to-run-stable-diffusion-with-core-ml)과 같은 다른 형식으로 변환해야 하는 경우 바닐라 어텐션 프로세서 ([`AttnProcessor`](https://github.com/huggingface/diffusers/blob/1a5797c6d4491a879ea5285c4efc377664e0332d/src/diffusers/models/attention_processor.py#L402))로 되돌릴 수 있습니다. 일반 어텐션 프로세서를 사용하려면 [`~diffusers.UNet2DConditionModel.set_default_attn_processor`] 함수를 사용할 수 있습니다: ```Python import torch from diffusers import DiffusionPipeline from diffusers.models.attention_processor import AttnProcessor pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda") pipe.unet.set_default_attn_processor() prompt = "a photo of an astronaut riding a horse on mars" image = pipe(prompt).images[0] ``` 2. **torch.compile** 추가적인 속도 향상을 위해 새로운 `torch.compile` 기능을 사용할 수 있습니다. 파이프라인의 UNet은 일반적으로 계산 비용이 가장 크기 때문에 나머지 하위 모델(텍스트 인코더와 VAE)은 그대로 두고 `unet`을 `torch.compile`로 래핑합니다. 자세한 내용과 다른 옵션은 [torch 컴파일 문서](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html)를 참조하세요. ```python pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) images = pipe(prompt, num_inference_steps=steps, num_images_per_prompt=batch_size).images ``` GPU 유형에 따라 `compile()`은 가속화된 트랜스포머 최적화를 통해 **5% - 300%**의 _추가 성능 향상_을 얻을 수 있습니다. 그러나 컴파일은 Ampere(A100, 3090), Ada(4090) 및 Hopper(H100)와 같은 최신 GPU 아키텍처에서 더 많은 성능 향상을 가져올 수 있음을 참고하세요. 컴파일은 완료하는 데 약간의 시간이 걸리므로, 파이프라인을 한 번 준비한 다음 동일한 유형의 추론 작업을 여러 번 수행해야 하는 상황에 가장 적합합니다. 다른 이미지 크기에서 컴파일된 파이프라인을 호출하면 시간적 비용이 많이 들 수 있는 컴파일 작업이 다시 트리거됩니다. ## 벤치마크 PyTorch 2.0의 효율적인 어텐션 구현과 `torch.compile`을 사용하여 가장 많이 사용되는 5개의 파이프라인에 대해 다양한 GPU와 배치 크기에 걸쳐 포괄적인 벤치마크를 수행했습니다. 여기서는 [`torch.compile()`이 최적으로 활용되도록 하는](https://github.com/huggingface/diffusers/pull/3313) `diffusers 0.17.0.dev0`을 사용했습니다. ### 벤치마킹 코드 #### Stable Diffusion text-to-image ```python from diffusers import DiffusionPipeline import torch path = "runwayml/stable-diffusion-v1-5" run_compile = True # Set True / False pipe = DiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16) pipe = pipe.to("cuda") pipe.unet.to(memory_format=torch.channels_last) if run_compile: print("Run torch compile") pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) prompt = "ghibli style, a fantasy landscape with castles" for _ in range(3): images = pipe(prompt=prompt).images ``` #### Stable Diffusion image-to-image ```python from diffusers import StableDiffusionImg2ImgPipeline import requests import torch from PIL import Image from io import BytesIO url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" response = requests.get(url) init_image = Image.open(BytesIO(response.content)).convert("RGB") init_image = init_image.resize((512, 512)) path = "runwayml/stable-diffusion-v1-5" run_compile = True # Set True / False pipe = StableDiffusionImg2ImgPipeline.from_pretrained(path, torch_dtype=torch.float16) pipe = pipe.to("cuda") pipe.unet.to(memory_format=torch.channels_last) if run_compile: print("Run torch compile") pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) prompt = "ghibli style, a fantasy landscape with castles" for _ in range(3): image = pipe(prompt=prompt, image=init_image).images[0] ``` #### Stable Diffusion - inpainting ```python from diffusers import StableDiffusionInpaintPipeline import requests import torch from PIL import Image from io import BytesIO url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" def download_image(url): response = requests.get(url) return Image.open(BytesIO(response.content)).convert("RGB") img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" init_image = download_image(img_url).resize((512, 512)) mask_image = download_image(mask_url).resize((512, 512)) path = "runwayml/stable-diffusion-inpainting" run_compile = True # Set True / False pipe = StableDiffusionInpaintPipeline.from_pretrained(path, torch_dtype=torch.float16) pipe = pipe.to("cuda") pipe.unet.to(memory_format=torch.channels_last) if run_compile: print("Run torch compile") pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) prompt = "ghibli style, a fantasy landscape with castles" for _ in range(3): image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0] ``` #### ControlNet ```python from diffusers import StableDiffusionControlNetPipeline, ControlNetModel import requests import torch from PIL import Image from io import BytesIO url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" response = requests.get(url) init_image = Image.open(BytesIO(response.content)).convert("RGB") init_image = init_image.resize((512, 512)) path = "runwayml/stable-diffusion-v1-5" run_compile = True # Set True / False controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( path, controlnet=controlnet, torch_dtype=torch.float16 ) pipe = pipe.to("cuda") pipe.unet.to(memory_format=torch.channels_last) pipe.controlnet.to(memory_format=torch.channels_last) if run_compile: print("Run torch compile") pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) pipe.controlnet = torch.compile(pipe.controlnet, mode="reduce-overhead", fullgraph=True) prompt = "ghibli style, a fantasy landscape with castles" for _ in range(3): image = pipe(prompt=prompt, image=init_image).images[0] ``` #### IF text-to-image + upscaling ```python from diffusers import DiffusionPipeline import torch run_compile = True # Set True / False pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-M-v1.0", variant="fp16", text_encoder=None, torch_dtype=torch.float16) pipe.to("cuda") pipe_2 = DiffusionPipeline.from_pretrained("DeepFloyd/IF-II-M-v1.0", variant="fp16", text_encoder=None, torch_dtype=torch.float16) pipe_2.to("cuda") pipe_3 = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-x4-upscaler", torch_dtype=torch.float16) pipe_3.to("cuda") pipe.unet.to(memory_format=torch.channels_last) pipe_2.unet.to(memory_format=torch.channels_last) pipe_3.unet.to(memory_format=torch.channels_last) if run_compile: pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) pipe_2.unet = torch.compile(pipe_2.unet, mode="reduce-overhead", fullgraph=True) pipe_3.unet = torch.compile(pipe_3.unet, mode="reduce-overhead", fullgraph=True) prompt = "the blue hulk" prompt_embeds = torch.randn((1, 2, 4096), dtype=torch.float16) neg_prompt_embeds = torch.randn((1, 2, 4096), dtype=torch.float16) for _ in range(3): image = pipe(prompt_embeds=prompt_embeds, negative_prompt_embeds=neg_prompt_embeds, output_type="pt").images image_2 = pipe_2(image=image, prompt_embeds=prompt_embeds, negative_prompt_embeds=neg_prompt_embeds, output_type="pt").images image_3 = pipe_3(prompt=prompt, image=image, noise_level=100).images ``` PyTorch 2.0 및 `torch.compile()`로 얻을 수 있는 가능한 속도 향상에 대해, [Stable Diffusion text-to-image pipeline](StableDiffusionPipeline)에 대한 상대적인 속도 향상을 보여주는 차트를 5개의 서로 다른 GPU 제품군(배치 크기 4)에 대해 나타냅니다: ![t2i_speedup](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/pt2_benchmarks/t2i_speedup.png) To give you an even better idea of how this speed-up holds for the other pipelines presented above, consider the following plot that shows the benchmarking numbers from an A100 across three different batch sizes (with PyTorch 2.0 nightly and `torch.compile()`): 이 속도 향상이 위에 제시된 다른 파이프라인에 대해서도 어떻게 유지되는지 더 잘 이해하기 위해, 세 가지의 다른 배치 크기에 걸쳐 A100의 벤치마킹(PyTorch 2.0 nightly 및 `torch.compile() 사용) 수치를 보여주는 차트를 보입니다: ![a100_numbers](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/pt2_benchmarks/a100_numbers.png) _(위 차트의 벤치마크 메트릭은 **초당 iteration 수(iterations/second)**입니다)_ 그러나 투명성을 위해 모든 벤치마킹 수치를 공개합니다! 다음 표들에서는, **_초당 처리되는 iteration_** 수 측면에서의 결과를 보여줍니다. ### A100 (batch size: 1) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 21.66 | 23.13 | 44.03 | 49.74 | | SD - img2img | 21.81 | 22.40 | 43.92 | 46.32 | | SD - inpaint | 22.24 | 23.23 | 43.76 | 49.25 | | SD - controlnet | 15.02 | 15.82 | 32.13 | 36.08 | | IF | 20.21 / <br>13.84 / <br>24.00 | 20.12 / <br>13.70 / <br>24.03 | ❌ | 97.34 / <br>27.23 / <br>111.66 | ### A100 (batch size: 4) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 11.6 | 13.12 | 14.62 | 17.27 | | SD - img2img | 11.47 | 13.06 | 14.66 | 17.25 | | SD - inpaint | 11.67 | 13.31 | 14.88 | 17.48 | | SD - controlnet | 8.28 | 9.38 | 10.51 | 12.41 | | IF | 25.02 | 18.04 | ❌ | 48.47 | ### A100 (batch size: 16) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 3.04 | 3.6 | 3.83 | 4.68 | | SD - img2img | 2.98 | 3.58 | 3.83 | 4.67 | | SD - inpaint | 3.04 | 3.66 | 3.9 | 4.76 | | SD - controlnet | 2.15 | 2.58 | 2.74 | 3.35 | | IF | 8.78 | 9.82 | ❌ | 16.77 | ### V100 (batch size: 1) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 18.99 | 19.14 | 20.95 | 22.17 | | SD - img2img | 18.56 | 19.18 | 20.95 | 22.11 | | SD - inpaint | 19.14 | 19.06 | 21.08 | 22.20 | | SD - controlnet | 13.48 | 13.93 | 15.18 | 15.88 | | IF | 20.01 / <br>9.08 / <br>23.34 | 19.79 / <br>8.98 / <br>24.10 | ❌ | 55.75 / <br>11.57 / <br>57.67 | ### V100 (batch size: 4) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 5.96 | 5.89 | 6.83 | 6.86 | | SD - img2img | 5.90 | 5.91 | 6.81 | 6.82 | | SD - inpaint | 5.99 | 6.03 | 6.93 | 6.95 | | SD - controlnet | 4.26 | 4.29 | 4.92 | 4.93 | | IF | 15.41 | 14.76 | ❌ | 22.95 | ### V100 (batch size: 16) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 1.66 | 1.66 | 1.92 | 1.90 | | SD - img2img | 1.65 | 1.65 | 1.91 | 1.89 | | SD - inpaint | 1.69 | 1.69 | 1.95 | 1.93 | | SD - controlnet | 1.19 | 1.19 | OOM after warmup | 1.36 | | IF | 5.43 | 5.29 | ❌ | 7.06 | ### T4 (batch size: 1) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 6.9 | 6.95 | 7.3 | 7.56 | | SD - img2img | 6.84 | 6.99 | 7.04 | 7.55 | | SD - inpaint | 6.91 | 6.7 | 7.01 | 7.37 | | SD - controlnet | 4.89 | 4.86 | 5.35 | 5.48 | | IF | 17.42 / <br>2.47 / <br>18.52 | 16.96 / <br>2.45 / <br>18.69 | ❌ | 24.63 / <br>2.47 / <br>23.39 | ### T4 (batch size: 4) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 1.79 | 1.79 | 2.03 | 1.99 | | SD - img2img | 1.77 | 1.77 | 2.05 | 2.04 | | SD - inpaint | 1.81 | 1.82 | 2.09 | 2.09 | | SD - controlnet | 1.34 | 1.27 | 1.47 | 1.46 | | IF | 5.79 | 5.61 | ❌ | 7.39 | ### T4 (batch size: 16) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 2.34s | 2.30s | OOM after 2nd iteration | 1.99s | | SD - img2img | 2.35s | 2.31s | OOM after warmup | 2.00s | | SD - inpaint | 2.30s | 2.26s | OOM after 2nd iteration | 1.95s | | SD - controlnet | OOM after 2nd iteration | OOM after 2nd iteration | OOM after warmup | OOM after warmup | | IF * | 1.44 | 1.44 | ❌ | 1.94 | ### RTX 3090 (batch size: 1) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 22.56 | 22.84 | 23.84 | 25.69 | | SD - img2img | 22.25 | 22.61 | 24.1 | 25.83 | | SD - inpaint | 22.22 | 22.54 | 24.26 | 26.02 | | SD - controlnet | 16.03 | 16.33 | 17.38 | 18.56 | | IF | 27.08 / <br>9.07 / <br>31.23 | 26.75 / <br>8.92 / <br>31.47 | ❌ | 68.08 / <br>11.16 / <br>65.29 | ### RTX 3090 (batch size: 4) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 6.46 | 6.35 | 7.29 | 7.3 | | SD - img2img | 6.33 | 6.27 | 7.31 | 7.26 | | SD - inpaint | 6.47 | 6.4 | 7.44 | 7.39 | | SD - controlnet | 4.59 | 4.54 | 5.27 | 5.26 | | IF | 16.81 | 16.62 | ❌ | 21.57 | ### RTX 3090 (batch size: 16) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 1.7 | 1.69 | 1.93 | 1.91 | | SD - img2img | 1.68 | 1.67 | 1.93 | 1.9 | | SD - inpaint | 1.72 | 1.71 | 1.97 | 1.94 | | SD - controlnet | 1.23 | 1.22 | 1.4 | 1.38 | | IF | 5.01 | 5.00 | ❌ | 6.33 | ### RTX 4090 (batch size: 1) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 40.5 | 41.89 | 44.65 | 49.81 | | SD - img2img | 40.39 | 41.95 | 44.46 | 49.8 | | SD - inpaint | 40.51 | 41.88 | 44.58 | 49.72 | | SD - controlnet | 29.27 | 30.29 | 32.26 | 36.03 | | IF | 69.71 / <br>18.78 / <br>85.49 | 69.13 / <br>18.80 / <br>85.56 | ❌ | 124.60 / <br>26.37 / <br>138.79 | ### RTX 4090 (batch size: 4) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 12.62 | 12.84 | 15.32 | 15.59 | | SD - img2img | 12.61 | 12,.79 | 15.35 | 15.66 | | SD - inpaint | 12.65 | 12.81 | 15.3 | 15.58 | | SD - controlnet | 9.1 | 9.25 | 11.03 | 11.22 | | IF | 31.88 | 31.14 | ❌ | 43.92 | ### RTX 4090 (batch size: 16) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 3.17 | 3.2 | 3.84 | 3.85 | | SD - img2img | 3.16 | 3.2 | 3.84 | 3.85 | | SD - inpaint | 3.17 | 3.2 | 3.85 | 3.85 | | SD - controlnet | 2.23 | 2.3 | 2.7 | 2.75 | | IF | 9.26 | 9.2 | ❌ | 13.31 | ## 참고 * Follow [this PR](https://github.com/huggingface/diffusers/pull/3313) for more details on the environment used for conducting the benchmarks. * For the IF pipeline and batch sizes > 1, we only used a batch size of >1 in the first IF pipeline for text-to-image generation and NOT for upscaling. So, that means the two upscaling pipelines received a batch size of 1. *Thanks to [Horace He](https://github.com/Chillee) from the PyTorch team for their support in improving our support of `torch.compile()` in Diffusers.* * 벤치마크 수행에 사용된 환경에 대한 자세한 내용은 [이 PR](https://github.com/huggingface/diffusers/pull/3313)을 참조하세요. * IF 파이프라인와 배치 크기 > 1의 경우 첫 번째 IF 파이프라인에서 text-to-image 생성을 위한 배치 크기 > 1만 사용했으며 업스케일링에는 사용하지 않았습니다. 즉, 두 개의 업스케일링 파이프라인이 배치 크기 1임을 의미합니다. *Diffusers에서 `torch.compile()` 지원을 개선하는 데 도움을 준 PyTorch 팀의 [Horace He](https://github.com/Chillee)에게 감사드립니다.*
diffusers/docs/source/ko/optimization/torch2.0.md/0
{ "file_path": "diffusers/docs/source/ko/optimization/torch2.0.md", "repo_id": "diffusers", "token_count": 10770 }
113
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # Diffusion 모델을 학습하기 Unconditional 이미지 생성은 학습에 사용된 데이터셋과 유사한 이미지를 생성하는 diffusion 모델에서 인기 있는 어플리케이션입니다. 일반적으로, 가장 좋은 결과는 특정 데이터셋에 사전 훈련된 모델을 파인튜닝하는 것으로 얻을 수 있습니다. 이 [허브](https://huggingface.co/search/full-text?q=unconditional-image-generation&type=model)에서 이러한 많은 체크포인트를 찾을 수 있지만, 만약 마음에 드는 체크포인트를 찾지 못했다면, 언제든지 스스로 학습할 수 있습니다! 이 튜토리얼은 나만의 🦋 나비 🦋를 생성하기 위해 [Smithsonian Butterflies](https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset) 데이터셋의 하위 집합에서 [`UNet2DModel`] 모델을 학습하는 방법을 가르쳐줄 것입니다. <Tip> 💡 이 학습 튜토리얼은 [Training with 🧨 Diffusers](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) 노트북 기반으로 합니다. Diffusion 모델의 작동 방식 및 자세한 내용은 노트북을 확인하세요! </Tip> 시작 전에, 🤗 Datasets을 불러오고 전처리하기 위해 데이터셋이 설치되어 있는지 다수 GPU에서 학습을 간소화하기 위해 🤗 Accelerate 가 설치되어 있는지 확인하세요. 그 후 학습 메트릭을 시각화하기 위해 [TensorBoard](https://www.tensorflow.org/tensorboard)를 또한 설치하세요. (또한 학습 추적을 위해 [Weights & Biases](https://docs.wandb.ai/)를 사용할 수 있습니다.) ```bash !pip install diffusers[training] ``` 커뮤니티에 모델을 공유할 것을 권장하며, 이를 위해서 Hugging Face 계정에 로그인을 해야 합니다. (계정이 없다면 [여기](https://hf.co/join)에서 만들 수 있습니다.) 노트북에서 로그인할 수 있으며 메시지가 표시되면 토큰을 입력할 수 있습니다. ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` 또는 터미널로 로그인할 수 있습니다: ```bash huggingface-cli login ``` 모델 체크포인트가 상당히 크기 때문에 [Git-LFS](https://git-lfs.com/)에서 대용량 파일의 버전 관리를 할 수 있습니다. ```bash !sudo apt -qq install git-lfs !git config --global credential.helper store ``` ## 학습 구성 편의를 위해 학습 파라미터들을 포함한 `TrainingConfig` 클래스를 생성합니다 (자유롭게 조정 가능): ```py >>> from dataclasses import dataclass >>> @dataclass ... class TrainingConfig: ... image_size = 128 # 생성되는 이미지 해상도 ... train_batch_size = 16 ... eval_batch_size = 16 # 평가 동안에 샘플링할 이미지 수 ... num_epochs = 50 ... gradient_accumulation_steps = 1 ... learning_rate = 1e-4 ... lr_warmup_steps = 500 ... save_image_epochs = 10 ... save_model_epochs = 30 ... mixed_precision = "fp16" # `no`는 float32, 자동 혼합 정밀도를 위한 `fp16` ... output_dir = "ddpm-butterflies-128" # 로컬 및 HF Hub에 저장되는 모델명 ... push_to_hub = True # 저장된 모델을 HF Hub에 업로드할지 여부 ... hub_private_repo = False ... overwrite_output_dir = True # 노트북을 다시 실행할 때 이전 모델에 덮어씌울지 ... seed = 0 >>> config = TrainingConfig() ``` ## 데이터셋 불러오기 🤗 Datasets 라이브러리와 [Smithsonian Butterflies](https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset) 데이터셋을 쉽게 불러올 수 있습니다. ```py >>> from datasets import load_dataset >>> config.dataset_name = "huggan/smithsonian_butterflies_subset" >>> dataset = load_dataset(config.dataset_name, split="train") ``` 💡[HugGan Community Event](https://huggingface.co/huggan) 에서 추가의 데이터셋을 찾거나 로컬의 [`ImageFolder`](https://huggingface.co/docs/datasets/image_dataset#imagefolder)를 만듦으로써 나만의 데이터셋을 사용할 수 있습니다. HugGan Community Event 에 가져온 데이터셋의 경우 리포지토리의 id로 `config.dataset_name` 을 설정하고, 나만의 이미지를 사용하는 경우 `imagefolder` 를 설정합니다. 🤗 Datasets은 [`~datasets.Image`] 기능을 사용해 자동으로 이미지 데이터를 디코딩하고 [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html)로 불러옵니다. 이를 시각화 해보면: ```py >>> import matplotlib.pyplot as plt >>> fig, axs = plt.subplots(1, 4, figsize=(16, 4)) >>> for i, image in enumerate(dataset[:4]["image"]): ... axs[i].imshow(image) ... axs[i].set_axis_off() >>> fig.show() ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/butterflies_ds.png) 이미지는 모두 다른 사이즈이기 때문에, 우선 전처리가 필요합니다: - `Resize` 는 `config.image_size` 에 정의된 이미지 사이즈로 변경합니다. - `RandomHorizontalFlip` 은 랜덤적으로 이미지를 미러링하여 데이터셋을 보강합니다. - `Normalize` 는 모델이 예상하는 [-1, 1] 범위로 픽셀 값을 재조정 하는데 중요합니다. ```py >>> from torchvision import transforms >>> preprocess = transforms.Compose( ... [ ... transforms.Resize((config.image_size, config.image_size)), ... transforms.RandomHorizontalFlip(), ... transforms.ToTensor(), ... transforms.Normalize([0.5], [0.5]), ... ] ... ) ``` 학습 도중에 `preprocess` 함수를 적용하려면 🤗 Datasets의 [`~datasets.Dataset.set_transform`] 방법이 사용됩니다. ```py >>> def transform(examples): ... images = [preprocess(image.convert("RGB")) for image in examples["image"]] ... return {"images": images} >>> dataset.set_transform(transform) ``` 이미지의 크기가 조정되었는지 확인하기 위해 이미지를 다시 시각화해보세요. 이제 [DataLoader](https://pytorch.org/docs/stable/data#torch.utils.data.DataLoader)에 데이터셋을 포함해 학습할 준비가 되었습니다! ```py >>> import torch >>> train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=config.train_batch_size, shuffle=True) ``` ## UNet2DModel 생성하기 🧨 Diffusers에 사전학습된 모델들은 모델 클래스에서 원하는 파라미터로 쉽게 생성할 수 있습니다. 예를 들어, [`UNet2DModel`]를 생성하려면: ```py >>> from diffusers import UNet2DModel >>> model = UNet2DModel( ... sample_size=config.image_size, # 타겟 이미지 해상도 ... in_channels=3, # 입력 채널 수, RGB 이미지에서 3 ... out_channels=3, # 출력 채널 수 ... layers_per_block=2, # UNet 블럭당 몇 개의 ResNet 레이어가 사용되는지 ... block_out_channels=(128, 128, 256, 256, 512, 512), # 각 UNet 블럭을 위한 출력 채널 수 ... down_block_types=( ... "DownBlock2D", # 일반적인 ResNet 다운샘플링 블럭 ... "DownBlock2D", ... "DownBlock2D", ... "DownBlock2D", ... "AttnDownBlock2D", # spatial self-attention이 포함된 일반적인 ResNet 다운샘플링 블럭 ... "DownBlock2D", ... ), ... up_block_types=( ... "UpBlock2D", # 일반적인 ResNet 업샘플링 블럭 ... "AttnUpBlock2D", # spatial self-attention이 포함된 일반적인 ResNet 업샘플링 블럭 ... "UpBlock2D", ... "UpBlock2D", ... "UpBlock2D", ... "UpBlock2D", ... ), ... ) ``` 샘플의 이미지 크기와 모델 출력 크기가 맞는지 빠르게 확인하기 위한 좋은 아이디어가 있습니다: ```py >>> sample_image = dataset[0]["images"].unsqueeze(0) >>> print("Input shape:", sample_image.shape) Input shape: torch.Size([1, 3, 128, 128]) >>> print("Output shape:", model(sample_image, timestep=0).sample.shape) Output shape: torch.Size([1, 3, 128, 128]) ``` 훌륭해요! 다음, 이미지에 약간의 노이즈를 더하기 위해 스케줄러가 필요합니다. ## 스케줄러 생성하기 스케줄러는 모델을 학습 또는 추론에 사용하는지에 따라 다르게 작동합니다. 추론시에, 스케줄러는 노이즈로부터 이미지를 생성합니다. 학습시 스케줄러는 diffusion 과정에서의 특정 포인트로부터 모델의 출력 또는 샘플을 가져와 *노이즈 스케줄* 과 *업데이트 규칙*에 따라 이미지에 노이즈를 적용합니다. `DDPMScheduler`를 보면 이전으로부터 `sample_image`에 랜덤한 노이즈를 더하는 `add_noise` 메서드를 사용합니다: ```py >>> import torch >>> from PIL import Image >>> from diffusers import DDPMScheduler >>> noise_scheduler = DDPMScheduler(num_train_timesteps=1000) >>> noise = torch.randn(sample_image.shape) >>> timesteps = torch.LongTensor([50]) >>> noisy_image = noise_scheduler.add_noise(sample_image, noise, timesteps) >>> Image.fromarray(((noisy_image.permute(0, 2, 3, 1) + 1.0) * 127.5).type(torch.uint8).numpy()[0]) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/noisy_butterfly.png) 모델의 학습 목적은 이미지에 더해진 노이즈를 예측하는 것입니다. 이 단계에서 손실은 다음과 같이 계산될 수 있습니다: ```py >>> import torch.nn.functional as F >>> noise_pred = model(noisy_image, timesteps).sample >>> loss = F.mse_loss(noise_pred, noise) ``` ## 모델 학습하기 지금까지, 모델 학습을 시작하기 위해 많은 부분을 갖추었으며 이제 남은 것은 모든 것을 조합하는 것입니다. 우선 옵티마이저(optimizer)와 학습률 스케줄러(learning rate scheduler)가 필요할 것입니다: ```py >>> from diffusers.optimization import get_cosine_schedule_with_warmup >>> optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate) >>> lr_scheduler = get_cosine_schedule_with_warmup( ... optimizer=optimizer, ... num_warmup_steps=config.lr_warmup_steps, ... num_training_steps=(len(train_dataloader) * config.num_epochs), ... ) ``` 그 후, 모델을 평가하는 방법이 필요합니다. 평가를 위해, `DDPMPipeline`을 사용해 배치의 이미지 샘플들을 생성하고 그리드 형태로 저장할 수 있습니다: ```py >>> from diffusers import DDPMPipeline >>> import math >>> import os >>> def make_grid(images, rows, cols): ... w, h = images[0].size ... grid = Image.new("RGB", size=(cols * w, rows * h)) ... for i, image in enumerate(images): ... grid.paste(image, box=(i % cols * w, i // cols * h)) ... return grid >>> def evaluate(config, epoch, pipeline): ... # 랜덤한 노이즈로 부터 이미지를 추출합니다.(이는 역전파 diffusion 과정입니다.) ... # 기본 파이프라인 출력 형태는 `List[PIL.Image]` 입니다. ... images = pipeline( ... batch_size=config.eval_batch_size, ... generator=torch.manual_seed(config.seed), ... ).images ... # 이미지들을 그리드로 만들어줍니다. ... image_grid = make_grid(images, rows=4, cols=4) ... # 이미지들을 저장합니다. ... test_dir = os.path.join(config.output_dir, "samples") ... os.makedirs(test_dir, exist_ok=True) ... image_grid.save(f"{test_dir}/{epoch:04d}.png") ``` TensorBoard에 로깅, 그래디언트 누적 및 혼합 정밀도 학습을 쉽게 수행하기 위해 🤗 Accelerate를 학습 루프에 함께 앞서 말한 모든 구성 정보들을 묶어 진행할 수 있습니다. 허브에 모델을 업로드 하기 위해 리포지토리 이름 및 정보를 가져오기 위한 함수를 작성하고 허브에 업로드할 수 있습니다. 💡아래의 학습 루프는 어렵고 길어 보일 수 있지만, 나중에 한 줄의 코드로 학습을 한다면 그만한 가치가 있을 것입니다! 만약 기다리지 못하고 이미지를 생성하고 싶다면, 아래 코드를 자유롭게 붙여넣고 작동시키면 됩니다. 🤗 ```py >>> from accelerate import Accelerator >>> from huggingface_hub import create_repo, upload_folder >>> from tqdm.auto import tqdm >>> from pathlib import Path >>> import os >>> def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler): ... # Initialize accelerator and tensorboard logging ... accelerator = Accelerator( ... mixed_precision=config.mixed_precision, ... gradient_accumulation_steps=config.gradient_accumulation_steps, ... log_with="tensorboard", ... project_dir=os.path.join(config.output_dir, "logs"), ... ) ... if accelerator.is_main_process: ... if config.output_dir is not None: ... os.makedirs(config.output_dir, exist_ok=True) ... if config.push_to_hub: ... repo_id = create_repo( ... repo_id=config.hub_model_id or Path(config.output_dir).name, exist_ok=True ... ).repo_id ... accelerator.init_trackers("train_example") ... # 모든 것이 준비되었습니다. ... # 기억해야 할 특정한 순서는 없으며 준비한 방법에 제공한 것과 동일한 순서로 객체의 압축을 풀면 됩니다. ... model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( ... model, optimizer, train_dataloader, lr_scheduler ... ) ... global_step = 0 ... # 이제 모델을 학습합니다. ... for epoch in range(config.num_epochs): ... progress_bar = tqdm(total=len(train_dataloader), disable=not accelerator.is_local_main_process) ... progress_bar.set_description(f"Epoch {epoch}") ... for step, batch in enumerate(train_dataloader): ... clean_images = batch["images"] ... # 이미지에 더할 노이즈를 샘플링합니다. ... noise = torch.randn(clean_images.shape, device=clean_images.device) ... bs = clean_images.shape[0] ... # 각 이미지를 위한 랜덤한 타임스텝(timestep)을 샘플링합니다. ... timesteps = torch.randint( ... 0, noise_scheduler.config.num_train_timesteps, (bs,), device=clean_images.device, ... dtype=torch.int64 ... ) ... # 각 타임스텝의 노이즈 크기에 따라 깨끗한 이미지에 노이즈를 추가합니다. ... # (이는 foward diffusion 과정입니다.) ... noisy_images = noise_scheduler.add_noise(clean_images, noise, timesteps) ... with accelerator.accumulate(model): ... # 노이즈를 반복적으로 예측합니다. ... noise_pred = model(noisy_images, timesteps, return_dict=False)[0] ... loss = F.mse_loss(noise_pred, noise) ... accelerator.backward(loss) ... accelerator.clip_grad_norm_(model.parameters(), 1.0) ... optimizer.step() ... lr_scheduler.step() ... optimizer.zero_grad() ... progress_bar.update(1) ... logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step} ... progress_bar.set_postfix(**logs) ... accelerator.log(logs, step=global_step) ... global_step += 1 ... # 각 에포크가 끝난 후 evaluate()와 몇 가지 데모 이미지를 선택적으로 샘플링하고 모델을 저장합니다. ... if accelerator.is_main_process: ... pipeline = DDPMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler) ... if (epoch + 1) % config.save_image_epochs == 0 or epoch == config.num_epochs - 1: ... evaluate(config, epoch, pipeline) ... if (epoch + 1) % config.save_model_epochs == 0 or epoch == config.num_epochs - 1: ... if config.push_to_hub: ... upload_folder( ... repo_id=repo_id, ... folder_path=config.output_dir, ... commit_message=f"Epoch {epoch}", ... ignore_patterns=["step_*", "epoch_*"], ... ) ... else: ... pipeline.save_pretrained(config.output_dir) ``` 휴, 코드가 꽤 많았네요! 하지만 🤗 Accelerate의 [`~accelerate.notebook_launcher`] 함수와 학습을 시작할 준비가 되었습니다. 함수에 학습 루프, 모든 학습 인수, 학습에 사용할 프로세스 수(사용 가능한 GPU의 수를 변경할 수 있음)를 전달합니다: ```py >>> from accelerate import notebook_launcher >>> args = (config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler) >>> notebook_launcher(train_loop, args, num_processes=1) ``` 한번 학습이 완료되면, diffusion 모델로 생성된 최종 🦋이미지🦋를 확인해보길 바랍니다! ```py >>> import glob >>> sample_images = sorted(glob.glob(f"{config.output_dir}/samples/*.png")) >>> Image.open(sample_images[-1]) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/butterflies_final.png) ## 다음 단계 Unconditional 이미지 생성은 학습될 수 있는 작업 중 하나의 예시입니다. 다른 작업과 학습 방법은 [🧨 Diffusers 학습 예시](../training/overview) 페이지에서 확인할 수 있습니다. 다음은 학습할 수 있는 몇 가지 예시입니다: - [Textual Inversion](../training/text_inversion), 특정 시각적 개념을 학습시켜 생성된 이미지에 통합시키는 알고리즘입니다. - [DreamBooth](../training/dreambooth), 주제에 대한 몇 가지 입력 이미지들이 주어지면 주제에 대한 개인화된 이미지를 생성하기 위한 기술입니다. - [Guide](../training/text2image) 데이터셋에 Stable Diffusion 모델을 파인튜닝하는 방법입니다. - [Guide](../training/lora) LoRA를 사용해 매우 큰 모델을 빠르게 파인튜닝하기 위한 메모리 효율적인 기술입니다.
diffusers/docs/source/ko/tutorials/basic_training.md/0
{ "file_path": "diffusers/docs/source/ko/tutorials/basic_training.md", "repo_id": "diffusers", "token_count": 11285 }
114
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Shap-E [[open-in-colab]] Shap-E는 비디오 게임 개발, 인테리어 디자인, 건축에 사용할 수 있는 3D 에셋을 생성하기 위한 conditional 모델입니다. 대규모 3D 에셋 데이터셋을 학습되었고, 각 오브젝트의 더 많은 뷰를 렌더링하고 4K point cloud 대신 16K를 생성하도록 후처리합니다. Shap-E 모델은 두 단계로 학습됩니다: 1. 인코더가 3D 에셋의 포인트 클라우드와 렌더링된 뷰를 받아들이고 에셋을 나타내는 implicit functions의 파라미터를 출력합니다. 2. 인코더가 생성한 latents를 바탕으로 diffusion 모델을 훈련하여 neural radiance fields(NeRF) 또는 textured 3D 메시를 생성하여 다운스트림 애플리케이션에서 3D 에셋을 더 쉽게 렌더링하고 사용할 수 있도록 합니다. 이 가이드에서는 Shap-E를 사용하여 나만의 3D 에셋을 생성하는 방법을 보입니다! 시작하기 전에 다음 라이브러리가 설치되어 있는지 확인하세요: ```py # Colab에서 필요한 라이브러리를 설치하기 위해 주석을 제외하세요 #!pip install -q diffusers transformers accelerate trimesh ``` ## Text-to-3D 3D 객체의 gif를 생성하려면 텍스트 프롬프트를 [`ShapEPipeline`]에 전달합니다. 파이프라인은 3D 객체를 생성하는 데 사용되는 이미지 프레임 리스트를 생성합니다. ```py import torch from diffusers import ShapEPipeline device = torch.device("cuda" if torch.cuda.is_available() else "cpu") pipe = ShapEPipeline.from_pretrained("openai/shap-e", torch_dtype=torch.float16, variant="fp16") pipe = pipe.to(device) guidance_scale = 15.0 prompt = ["A firecracker", "A birthday cupcake"] images = pipe( prompt, guidance_scale=guidance_scale, num_inference_steps=64, frame_size=256, ).images ``` 이제 [`~utils.export_to_gif`] 함수를 사용하여 이미지 프레임 리스트를 3D 객체의 gif로 변환합니다. ```py from diffusers.utils import export_to_gif export_to_gif(images[0], "firecracker_3d.gif") export_to_gif(images[1], "cake_3d.gif") ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/shap_e/firecracker_out.gif"/> <figcaption class="mt-2 text-center text-sm text-gray-500">prompt = "A firecracker"</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/shap_e/cake_out.gif"/> <figcaption class="mt-2 text-center text-sm text-gray-500">prompt = "A birthday cupcake"</figcaption> </div> </div> ## Image-to-3D 다른 이미지로부터 3D 개체를 생성하려면 [`ShapEImg2ImgPipeline`]을 사용합니다. 기존 이미지를 사용하거나 완전히 새로운 이미지를 생성할 수 있습니다. [Kandinsky 2.1](../api/pipelines/kandinsky) 모델을 사용하여 새 이미지를 생성해 보겠습니다. ```py from diffusers import DiffusionPipeline import torch prior_pipeline = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16, use_safetensors=True).to("cuda") pipeline = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16, use_safetensors=True).to("cuda") prompt = "A cheeseburger, white background" image_embeds, negative_image_embeds = prior_pipeline(prompt, guidance_scale=1.0).to_tuple() image = pipeline( prompt, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, ).images[0] image.save("burger.png") ``` 치즈버거를 [`ShapEImg2ImgPipeline`]에 전달하여 3D representation을 생성합니다. ```py from PIL import Image from diffusers import ShapEImg2ImgPipeline from diffusers.utils import export_to_gif pipe = ShapEImg2ImgPipeline.from_pretrained("openai/shap-e-img2img", torch_dtype=torch.float16, variant="fp16").to("cuda") guidance_scale = 3.0 image = Image.open("burger.png").resize((256, 256)) images = pipe( image, guidance_scale=guidance_scale, num_inference_steps=64, frame_size=256, ).images gif_path = export_to_gif(images[0], "burger_3d.gif") ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/shap_e/burger_in.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">cheeseburger</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/shap_e/burger_out.gif"/> <figcaption class="mt-2 text-center text-sm text-gray-500">3D cheeseburger</figcaption> </div> </div> ## 메시 생성하기 Shap-E는 다운스트림 애플리케이션에 렌더링할 textured 메시 출력을 생성할 수도 있는 유연한 모델입니다. 이 예제에서는 🤗 Datasets 라이브러리에서 [Dataset viewer](https://huggingface.co/docs/hub/datasets-viewer#dataset-preview)를 사용해 메시 시각화를 지원하는 `glb` 파일로 변환합니다. `output_type` 매개변수를 `"mesh"`로 지정함으로써 [`ShapEPipeline`]과 [`ShapEImg2ImgPipeline`] 모두에 대한 메시 출력을 생성할 수 있습니다: ```py import torch from diffusers import ShapEPipeline device = torch.device("cuda" if torch.cuda.is_available() else "cpu") pipe = ShapEPipeline.from_pretrained("openai/shap-e", torch_dtype=torch.float16, variant="fp16") pipe = pipe.to(device) guidance_scale = 15.0 prompt = "A birthday cupcake" images = pipe(prompt, guidance_scale=guidance_scale, num_inference_steps=64, frame_size=256, output_type="mesh").images ``` 메시 출력을 `ply` 파일로 저장하려면 [`~utils.export_to_ply`] 함수를 사용합니다: <Tip> 선택적으로 [`~utils.export_to_obj`] 함수를 사용하여 메시 출력을 `obj` 파일로 저장할 수 있습니다. 다양한 형식으로 메시 출력을 저장할 수 있어 다운스트림에서 더욱 유연하게 사용할 수 있습니다! </Tip> ```py from diffusers.utils import export_to_ply ply_path = export_to_ply(images[0], "3d_cake.ply") print(f"Saved to folder: {ply_path}") ``` 그 다음 trimesh 라이브러리를 사용하여 `ply` 파일을 `glb` 파일로 변환할 수 있습니다: ```py import trimesh mesh = trimesh.load("3d_cake.ply") mesh_export = mesh.export("3d_cake.glb", file_type="glb") ``` 기본적으로 메시 출력은 아래쪽 시점에 초점이 맞춰져 있지만 회전 변환을 적용하여 기본 시점을 변경할 수 있습니다: ```py import trimesh import numpy as np mesh = trimesh.load("3d_cake.ply") rot = trimesh.transformations.rotation_matrix(-np.pi / 2, [1, 0, 0]) mesh = mesh.apply_transform(rot) mesh_export = mesh.export("3d_cake.glb", file_type="glb") ``` 메시 파일을 데이터셋 레포지토리에 업로드해 Dataset viewer로 시각화하세요! <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/3D-cake.gif"/> </div>
diffusers/docs/source/ko/using-diffusers/shap-e.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/shap-e.md", "repo_id": "diffusers", "token_count": 4198 }
115
<!--- Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 🧨 Diffusers Examples Diffusers examples are a collection of scripts to demonstrate how to effectively use the `diffusers` library for a variety of use cases involving training or fine-tuning. **Note**: If you are looking for **official** examples on how to use `diffusers` for inference, please have a look at [src/diffusers/pipelines](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines). Our examples aspire to be **self-contained**, **easy-to-tweak**, **beginner-friendly** and for **one-purpose-only**. More specifically, this means: - **Self-contained**: An example script shall only depend on "pip-install-able" Python packages that can be found in a `requirements.txt` file. Example scripts shall **not** depend on any local files. This means that one can simply download an example script, *e.g.* [train_unconditional.py](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/train_unconditional.py), install the required dependencies, *e.g.* [requirements.txt](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/requirements.txt) and execute the example script. - **Easy-to-tweak**: While we strive to present as many use cases as possible, the example scripts are just that - examples. It is expected that they won't work out-of-the box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs. To help you with that, most of the examples fully expose the preprocessing of the data and the training loop to allow you to tweak and edit them as required. - **Beginner-friendly**: We do not aim for providing state-of-the-art training scripts for the newest models, but rather examples that can be used as a way to better understand diffusion models and how to use them with the `diffusers` library. We often purposefully leave out certain state-of-the-art methods if we consider them too complex for beginners. - **One-purpose-only**: Examples should show one task and one task only. Even if a task is from a modeling point of view very similar, *e.g.* image super-resolution and image modification tend to use the same model and training method, we want examples to showcase only one task to keep them as readable and easy-to-understand as possible. We provide **official** examples that cover the most popular tasks of diffusion models. *Official* examples are **actively** maintained by the `diffusers` maintainers and we try to rigorously follow our example philosophy as defined above. If you feel like another important example should exist, we are more than happy to welcome a [Feature Request](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=) or directly a [Pull Request](https://github.com/huggingface/diffusers/compare) from you! Training examples show how to pretrain or fine-tune diffusion models for a variety of tasks. Currently we support: | Task | 🤗 Accelerate | 🤗 Datasets | Colab |---|---|:---:|:---:| | [**Unconditional Image Generation**](./unconditional_image_generation) | ✅ | ✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) | [**Text-to-Image fine-tuning**](./text_to_image) | ✅ | ✅ | | [**Textual Inversion**](./textual_inversion) | ✅ | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) | [**Dreambooth**](./dreambooth) | ✅ | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb) | [**ControlNet**](./controlnet) | ✅ | ✅ | - | [**InstructPix2Pix**](./instruct_pix2pix) | ✅ | ✅ | - | [**Reinforcement Learning for Control**](./reinforcement_learning) | - | - | coming soon. ## Community In addition, we provide **community** examples, which are examples added and maintained by our community. Community examples can consist of both *training* examples or *inference* pipelines. For such examples, we are more lenient regarding the philosophy defined above and also cannot guarantee to provide maintenance for every issue. Examples that are useful for the community, but are either not yet deemed popular or not yet following our above philosophy should go into the [community examples](https://github.com/huggingface/diffusers/tree/main/examples/community) folder. The community folder therefore includes training examples and inference pipelines. **Note**: Community examples can be a [great first contribution](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) to show to the community how you like to use `diffusers` 🪄. ## Research Projects We also provide **research_projects** examples that are maintained by the community as defined in the respective research project folders. These examples are useful and offer the extended capabilities which are complementary to the official examples. You may refer to [research_projects](https://github.com/huggingface/diffusers/tree/main/examples/research_projects) for details. ## Important note To make sure you can successfully run the latest versions of the example scripts, you have to **install the library from source** and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd in the example folder of your choice and run ```bash pip install -r requirements.txt ```
diffusers/examples/README.md/0
{ "file_path": "diffusers/examples/README.md", "repo_id": "diffusers", "token_count": 1764 }
116
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from math import pi from typing import Callable, List, Optional, Tuple, Union import numpy as np import torch from PIL import Image from diffusers import DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNet2DModel from diffusers.utils.torch_utils import randn_tensor class DPSPipeline(DiffusionPipeline): r""" Pipeline for Diffusion Posterior Sampling. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: unet ([`UNet2DModel`]): A `UNet2DModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of [`DDPMScheduler`], or [`DDIMScheduler`]. """ model_cpu_offload_seq = "unet" def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, measurement: torch.Tensor, operator: torch.nn.Module, loss_fn: Callable[[torch.Tensor, torch.Tensor], torch.Tensor], batch_size: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, num_inference_steps: int = 1000, output_type: Optional[str] = "pil", return_dict: bool = True, zeta: float = 0.3, ) -> Union[ImagePipelineOutput, Tuple]: r""" The call function to the pipeline for generation. Args: measurement (`torch.Tensor`, *required*): A 'torch.Tensor', the corrupted image operator (`torch.nn.Module`, *required*): A 'torch.nn.Module', the operator generating the corrupted image loss_fn (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *required*): A 'Callable[[torch.Tensor, torch.Tensor], torch.Tensor]', the loss function used between the measurements, for most of the cases using RMSE is fine. batch_size (`int`, *optional*, defaults to 1): The number of images to generate. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. num_inference_steps (`int`, *optional*, defaults to 1000): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Example: ```py >>> from diffusers import DDPMPipeline >>> # load model and scheduler >>> pipe = DDPMPipeline.from_pretrained("google/ddpm-cat-256") >>> # run pipeline in inference (sample random noise and denoise) >>> image = pipe().images[0] >>> # save image >>> image.save("ddpm_generated_image.png") ``` Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images """ # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size, int): image_shape = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if self.device.type == "mps": # randn does not work reproducibly on mps image = randn_tensor(image_shape, generator=generator) image = image.to(self.device) else: image = randn_tensor(image_shape, generator=generator, device=self.device) # set step values self.scheduler.set_timesteps(num_inference_steps) for t in self.progress_bar(self.scheduler.timesteps): with torch.enable_grad(): # 1. predict noise model_output image = image.requires_grad_() model_output = self.unet(image, t).sample # 2. compute previous image x'_{t-1} and original prediction x0_{t} scheduler_out = self.scheduler.step(model_output, t, image, generator=generator) image_pred, origi_pred = scheduler_out.prev_sample, scheduler_out.pred_original_sample # 3. compute y'_t = f(x0_{t}) measurement_pred = operator(origi_pred) # 4. compute loss = d(y, y'_t-1) loss = loss_fn(measurement, measurement_pred) loss.backward() print("distance: {0:.4f}".format(loss.item())) with torch.no_grad(): image_pred = image_pred - zeta * image.grad image = image_pred.detach() image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) if __name__ == "__main__": import scipy from torch import nn from torchvision.utils import save_image # defining the operators f(.) of y = f(x) # super-resolution operator class SuperResolutionOperator(nn.Module): def __init__(self, in_shape, scale_factor): super().__init__() # Resizer local class, do not use outiside the SR operator class class Resizer(nn.Module): def __init__(self, in_shape, scale_factor=None, output_shape=None, kernel=None, antialiasing=True): super(Resizer, self).__init__() # First standardize values and fill missing arguments (if needed) by deriving scale from output shape or vice versa scale_factor, output_shape = self.fix_scale_and_size(in_shape, output_shape, scale_factor) # Choose interpolation method, each method has the matching kernel size def cubic(x): absx = np.abs(x) absx2 = absx**2 absx3 = absx**3 return (1.5 * absx3 - 2.5 * absx2 + 1) * (absx <= 1) + ( -0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2 ) * ((1 < absx) & (absx <= 2)) def lanczos2(x): return ( (np.sin(pi * x) * np.sin(pi * x / 2) + np.finfo(np.float32).eps) / ((pi**2 * x**2 / 2) + np.finfo(np.float32).eps) ) * (abs(x) < 2) def box(x): return ((-0.5 <= x) & (x < 0.5)) * 1.0 def lanczos3(x): return ( (np.sin(pi * x) * np.sin(pi * x / 3) + np.finfo(np.float32).eps) / ((pi**2 * x**2 / 3) + np.finfo(np.float32).eps) ) * (abs(x) < 3) def linear(x): return (x + 1) * ((-1 <= x) & (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1)) method, kernel_width = { "cubic": (cubic, 4.0), "lanczos2": (lanczos2, 4.0), "lanczos3": (lanczos3, 6.0), "box": (box, 1.0), "linear": (linear, 2.0), None: (cubic, 4.0), # set default interpolation method as cubic }.get(kernel) # Antialiasing is only used when downscaling antialiasing *= np.any(np.array(scale_factor) < 1) # Sort indices of dimensions according to scale of each dimension. since we are going dim by dim this is efficient sorted_dims = np.argsort(np.array(scale_factor)) self.sorted_dims = [int(dim) for dim in sorted_dims if scale_factor[dim] != 1] # Iterate over dimensions to calculate local weights for resizing and resize each time in one direction field_of_view_list = [] weights_list = [] for dim in self.sorted_dims: # for each coordinate (along 1 dim), calculate which coordinates in the input image affect its result and the # weights that multiply the values there to get its result. weights, field_of_view = self.contributions( in_shape[dim], output_shape[dim], scale_factor[dim], method, kernel_width, antialiasing ) # convert to torch tensor weights = torch.tensor(weights.T, dtype=torch.float32) # We add singleton dimensions to the weight matrix so we can multiply it with the big tensor we get for # tmp_im[field_of_view.T], (bsxfun style) weights_list.append( nn.Parameter( torch.reshape(weights, list(weights.shape) + (len(scale_factor) - 1) * [1]), requires_grad=False, ) ) field_of_view_list.append( nn.Parameter( torch.tensor(field_of_view.T.astype(np.int32), dtype=torch.long), requires_grad=False ) ) self.field_of_view = nn.ParameterList(field_of_view_list) self.weights = nn.ParameterList(weights_list) def forward(self, in_tensor): x = in_tensor # Use the affecting position values and the set of weights to calculate the result of resizing along this 1 dim for dim, fov, w in zip(self.sorted_dims, self.field_of_view, self.weights): # To be able to act on each dim, we swap so that dim 0 is the wanted dim to resize x = torch.transpose(x, dim, 0) # This is a bit of a complicated multiplication: x[field_of_view.T] is a tensor of order image_dims+1. # for each pixel in the output-image it matches the positions the influence it from the input image (along 1 dim # only, this is why it only adds 1 dim to 5the shape). We then multiply, for each pixel, its set of positions with # the matching set of weights. we do this by this big tensor element-wise multiplication (MATLAB bsxfun style: # matching dims are multiplied element-wise while singletons mean that the matching dim is all multiplied by the # same number x = torch.sum(x[fov] * w, dim=0) # Finally we swap back the axes to the original order x = torch.transpose(x, dim, 0) return x def fix_scale_and_size(self, input_shape, output_shape, scale_factor): # First fixing the scale-factor (if given) to be standardized the function expects (a list of scale factors in the # same size as the number of input dimensions) if scale_factor is not None: # By default, if scale-factor is a scalar we assume 2d resizing and duplicate it. if np.isscalar(scale_factor) and len(input_shape) > 1: scale_factor = [scale_factor, scale_factor] # We extend the size of scale-factor list to the size of the input by assigning 1 to all the unspecified scales scale_factor = list(scale_factor) scale_factor = [1] * (len(input_shape) - len(scale_factor)) + scale_factor # Fixing output-shape (if given): extending it to the size of the input-shape, by assigning the original input-size # to all the unspecified dimensions if output_shape is not None: output_shape = list(input_shape[len(output_shape) :]) + list(np.uint(np.array(output_shape))) # Dealing with the case of non-give scale-factor, calculating according to output-shape. note that this is # sub-optimal, because there can be different scales to the same output-shape. if scale_factor is None: scale_factor = 1.0 * np.array(output_shape) / np.array(input_shape) # Dealing with missing output-shape. calculating according to scale-factor if output_shape is None: output_shape = np.uint(np.ceil(np.array(input_shape) * np.array(scale_factor))) return scale_factor, output_shape def contributions(self, in_length, out_length, scale, kernel, kernel_width, antialiasing): # This function calculates a set of 'filters' and a set of field_of_view that will later on be applied # such that each position from the field_of_view will be multiplied with a matching filter from the # 'weights' based on the interpolation method and the distance of the sub-pixel location from the pixel centers # around it. This is only done for one dimension of the image. # When anti-aliasing is activated (default and only for downscaling) the receptive field is stretched to size of # 1/sf. this means filtering is more 'low-pass filter'. fixed_kernel = (lambda arg: scale * kernel(scale * arg)) if antialiasing else kernel kernel_width *= 1.0 / scale if antialiasing else 1.0 # These are the coordinates of the output image out_coordinates = np.arange(1, out_length + 1) # since both scale-factor and output size can be provided simulatneously, perserving the center of the image requires shifting # the output coordinates. the deviation is because out_length doesn't necesary equal in_length*scale. # to keep the center we need to subtract half of this deivation so that we get equal margins for boths sides and center is preserved. shifted_out_coordinates = out_coordinates - (out_length - in_length * scale) / 2 # These are the matching positions of the output-coordinates on the input image coordinates. # Best explained by example: say we have 4 horizontal pixels for HR and we downscale by SF=2 and get 2 pixels: # [1,2,3,4] -> [1,2]. Remember each pixel number is the middle of the pixel. # The scaling is done between the distances and not pixel numbers (the right boundary of pixel 4 is transformed to # the right boundary of pixel 2. pixel 1 in the small image matches the boundary between pixels 1 and 2 in the big # one and not to pixel 2. This means the position is not just multiplication of the old pos by scale-factor). # So if we measure distance from the left border, middle of pixel 1 is at distance d=0.5, border between 1 and 2 is # at d=1, and so on (d = p - 0.5). we calculate (d_new = d_old / sf) which means: # (p_new-0.5 = (p_old-0.5) / sf) -> p_new = p_old/sf + 0.5 * (1-1/sf) match_coordinates = shifted_out_coordinates / scale + 0.5 * (1 - 1 / scale) # This is the left boundary to start multiplying the filter from, it depends on the size of the filter left_boundary = np.floor(match_coordinates - kernel_width / 2) # Kernel width needs to be enlarged because when covering has sub-pixel borders, it must 'see' the pixel centers # of the pixels it only covered a part from. So we add one pixel at each side to consider (weights can zeroize them) expanded_kernel_width = np.ceil(kernel_width) + 2 # Determine a set of field_of_view for each each output position, these are the pixels in the input image # that the pixel in the output image 'sees'. We get a matrix whos horizontal dim is the output pixels (big) and the # vertical dim is the pixels it 'sees' (kernel_size + 2) field_of_view = np.squeeze( np.int16(np.expand_dims(left_boundary, axis=1) + np.arange(expanded_kernel_width) - 1) ) # Assign weight to each pixel in the field of view. A matrix whos horizontal dim is the output pixels and the # vertical dim is a list of weights matching to the pixel in the field of view (that are specified in # 'field_of_view') weights = fixed_kernel(1.0 * np.expand_dims(match_coordinates, axis=1) - field_of_view - 1) # Normalize weights to sum up to 1. be careful from dividing by 0 sum_weights = np.sum(weights, axis=1) sum_weights[sum_weights == 0] = 1.0 weights = 1.0 * weights / np.expand_dims(sum_weights, axis=1) # We use this mirror structure as a trick for reflection padding at the boundaries mirror = np.uint(np.concatenate((np.arange(in_length), np.arange(in_length - 1, -1, step=-1)))) field_of_view = mirror[np.mod(field_of_view, mirror.shape[0])] # Get rid of weights and pixel positions that are of zero weight non_zero_out_pixels = np.nonzero(np.any(weights, axis=0)) weights = np.squeeze(weights[:, non_zero_out_pixels]) field_of_view = np.squeeze(field_of_view[:, non_zero_out_pixels]) # Final products are the relative positions and the matching weights, both are output_size X fixed_kernel_size return weights, field_of_view self.down_sample = Resizer(in_shape, 1 / scale_factor) for param in self.parameters(): param.requires_grad = False def forward(self, data, **kwargs): return self.down_sample(data) # Gaussian blurring operator class GaussialBlurOperator(nn.Module): def __init__(self, kernel_size, intensity): super().__init__() class Blurkernel(nn.Module): def __init__(self, blur_type="gaussian", kernel_size=31, std=3.0): super().__init__() self.blur_type = blur_type self.kernel_size = kernel_size self.std = std self.seq = nn.Sequential( nn.ReflectionPad2d(self.kernel_size // 2), nn.Conv2d(3, 3, self.kernel_size, stride=1, padding=0, bias=False, groups=3), ) self.weights_init() def forward(self, x): return self.seq(x) def weights_init(self): if self.blur_type == "gaussian": n = np.zeros((self.kernel_size, self.kernel_size)) n[self.kernel_size // 2, self.kernel_size // 2] = 1 k = scipy.ndimage.gaussian_filter(n, sigma=self.std) k = torch.from_numpy(k) self.k = k for name, f in self.named_parameters(): f.data.copy_(k) def update_weights(self, k): if not torch.is_tensor(k): k = torch.from_numpy(k) for name, f in self.named_parameters(): f.data.copy_(k) def get_kernel(self): return self.k self.kernel_size = kernel_size self.conv = Blurkernel(blur_type="gaussian", kernel_size=kernel_size, std=intensity) self.kernel = self.conv.get_kernel() self.conv.update_weights(self.kernel.type(torch.float32)) for param in self.parameters(): param.requires_grad = False def forward(self, data, **kwargs): return self.conv(data) def transpose(self, data, **kwargs): return data def get_kernel(self): return self.kernel.view(1, 1, self.kernel_size, self.kernel_size) # assuming the forward process y = f(x) is polluted by Gaussian noise, use l2 norm def RMSELoss(yhat, y): return torch.sqrt(torch.sum((yhat - y) ** 2)) # set up source image src = Image.open("sample.png") # read image into [1,3,H,W] src = torch.from_numpy(np.array(src, dtype=np.float32)).permute(2, 0, 1)[None] # normalize image to [-1,1] src = (src / 127.5) - 1.0 src = src.to("cuda") # set up operator and measurement # operator = SuperResolutionOperator(in_shape=src.shape, scale_factor=4).to("cuda") operator = GaussialBlurOperator(kernel_size=61, intensity=3.0).to("cuda") measurement = operator(src) # set up scheduler scheduler = DDPMScheduler.from_pretrained("google/ddpm-celebahq-256") scheduler.set_timesteps(1000) # set up model model = UNet2DModel.from_pretrained("google/ddpm-celebahq-256").to("cuda") save_image((src + 1.0) / 2.0, "dps_src.png") save_image((measurement + 1.0) / 2.0, "dps_mea.png") # finally, the pipeline dpspipe = DPSPipeline(model, scheduler) image = dpspipe( measurement=measurement, operator=operator, loss_fn=RMSELoss, zeta=1.0, ).images[0] image.save("dps_generated_image.png")
diffusers/examples/community/dps_pipeline.py/0
{ "file_path": "diffusers/examples/community/dps_pipeline.py", "repo_id": "diffusers", "token_count": 11140 }
117
import inspect import re from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.image_processor import VaeImageProcessor from diffusers.loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import randn_tensor # ------------------------------------------------------------------------------ logger = logging.get_logger(__name__) # pylint: disable=invalid-name re_attention = re.compile( r""" \\\(| \\\)| \\\[| \\]| \\\\| \\| \(| \[| :([+-]?[.\d]+)\)| \)| ]| [^\\()\[\]:]+| : """, re.X, ) def parse_prompt_attention(text): """ Parses a string with attention tokens and returns a list of pairs: text and its associated weight. Accepted tokens are: (abc) - increases attention to abc by a multiplier of 1.1 (abc:3.12) - increases attention to abc by a multiplier of 3.12 [abc] - decreases attention to abc by a multiplier of 1.1 \\( - literal character '(' \\[ - literal character '[' \\) - literal character ')' \\] - literal character ']' \\ - literal character '\' anything else - just text >>> parse_prompt_attention('normal text') [['normal text', 1.0]] >>> parse_prompt_attention('an (important) word') [['an ', 1.0], ['important', 1.1], [' word', 1.0]] >>> parse_prompt_attention('(unbalanced') [['unbalanced', 1.1]] >>> parse_prompt_attention('\\(literal\\]') [['(literal]', 1.0]] >>> parse_prompt_attention('(unnecessary)(parens)') [['unnecessaryparens', 1.1]] >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).') [['a ', 1.0], ['house', 1.5730000000000004], [' ', 1.1], ['on', 1.0], [' a ', 1.1], ['hill', 0.55], [', sun, ', 1.1], ['sky', 1.4641000000000006], ['.', 1.1]] """ res = [] round_brackets = [] square_brackets = [] round_bracket_multiplier = 1.1 square_bracket_multiplier = 1 / 1.1 def multiply_range(start_position, multiplier): for p in range(start_position, len(res)): res[p][1] *= multiplier for m in re_attention.finditer(text): text = m.group(0) weight = m.group(1) if text.startswith("\\"): res.append([text[1:], 1.0]) elif text == "(": round_brackets.append(len(res)) elif text == "[": square_brackets.append(len(res)) elif weight is not None and len(round_brackets) > 0: multiply_range(round_brackets.pop(), float(weight)) elif text == ")" and len(round_brackets) > 0: multiply_range(round_brackets.pop(), round_bracket_multiplier) elif text == "]" and len(square_brackets) > 0: multiply_range(square_brackets.pop(), square_bracket_multiplier) else: res.append([text, 1.0]) for pos in round_brackets: multiply_range(pos, round_bracket_multiplier) for pos in square_brackets: multiply_range(pos, square_bracket_multiplier) if len(res) == 0: res = [["", 1.0]] # merge runs of identical weights i = 0 while i + 1 < len(res): if res[i][1] == res[i + 1][1]: res[i][0] += res[i + 1][0] res.pop(i + 1) else: i += 1 return res def get_prompts_with_weights(pipe: DiffusionPipeline, prompt: List[str], max_length: int): r""" Tokenize a list of prompts and return its tokens with weights of each token. No padding, starting or ending token is included. """ tokens = [] weights = [] truncated = False for text in prompt: texts_and_weights = parse_prompt_attention(text) text_token = [] text_weight = [] for word, weight in texts_and_weights: # tokenize and discard the starting and the ending token token = pipe.tokenizer(word).input_ids[1:-1] text_token += token # copy the weight by length of token text_weight += [weight] * len(token) # stop if the text is too long (longer than truncation limit) if len(text_token) > max_length: truncated = True break # truncate if len(text_token) > max_length: truncated = True text_token = text_token[:max_length] text_weight = text_weight[:max_length] tokens.append(text_token) weights.append(text_weight) if truncated: logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples") return tokens, weights def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77): r""" Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length. """ max_embeddings_multiples = (max_length - 2) // (chunk_length - 2) weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length for i in range(len(tokens)): tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos] if no_boseos_middle: weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i])) else: w = [] if len(weights[i]) == 0: w = [1.0] * weights_length else: for j in range(max_embeddings_multiples): w.append(1.0) # weight for starting token in this chunk w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))] w.append(1.0) # weight for ending token in this chunk w += [1.0] * (weights_length - len(w)) weights[i] = w[:] return tokens, weights def get_unweighted_text_embeddings( pipe: DiffusionPipeline, text_input: torch.Tensor, chunk_length: int, no_boseos_middle: Optional[bool] = True, clip_skip: Optional[int] = None, ): """ When the length of tokens is a multiple of the capacity of the text encoder, it should be split into chunks and sent to the text encoder individually. """ max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2) if max_embeddings_multiples > 1: text_embeddings = [] for i in range(max_embeddings_multiples): # extract the i-th chunk text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone() # cover the head and the tail by the starting and the ending tokens text_input_chunk[:, 0] = text_input[0, 0] text_input_chunk[:, -1] = text_input[0, -1] if clip_skip is None: prompt_embeds = pipe.text_encoder(text_input_chunk.to(pipe.device)) text_embedding = prompt_embeds[0] else: prompt_embeds = pipe.text_encoder(text_input_chunk.to(pipe.device), output_hidden_states=True) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. text_embedding = pipe.text_encoder.text_model.final_layer_norm(prompt_embeds) if no_boseos_middle: if i == 0: # discard the ending token text_embedding = text_embedding[:, :-1] elif i == max_embeddings_multiples - 1: # discard the starting token text_embedding = text_embedding[:, 1:] else: # discard both starting and ending tokens text_embedding = text_embedding[:, 1:-1] text_embeddings.append(text_embedding) text_embeddings = torch.concat(text_embeddings, axis=1) else: if clip_skip is None: clip_skip = 0 prompt_embeds = pipe.text_encoder(text_input, output_hidden_states=True)[-1][-(clip_skip + 1)] text_embeddings = pipe.text_encoder.text_model.final_layer_norm(prompt_embeds) return text_embeddings def get_weighted_text_embeddings( pipe: DiffusionPipeline, prompt: Union[str, List[str]], uncond_prompt: Optional[Union[str, List[str]]] = None, max_embeddings_multiples: Optional[int] = 3, no_boseos_middle: Optional[bool] = False, skip_parsing: Optional[bool] = False, skip_weighting: Optional[bool] = False, clip_skip=None, lora_scale=None, ): r""" Prompts can be assigned with local weights using brackets. For example, prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful', and the embedding tokens corresponding to the words get multiplied by a constant, 1.1. Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean. Args: pipe (`DiffusionPipeline`): Pipe to provide access to the tokenizer and the text encoder. prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. uncond_prompt (`str` or `List[str]`): The unconditional prompt or prompts for guide the image generation. If unconditional prompt is provided, the embeddings of prompt and uncond_prompt are concatenated. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. no_boseos_middle (`bool`, *optional*, defaults to `False`): If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and ending token in each of the chunk in the middle. skip_parsing (`bool`, *optional*, defaults to `False`): Skip the parsing of brackets. skip_weighting (`bool`, *optional*, defaults to `False`): Skip the weighting. When the parsing is skipped, it is forced True. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(pipe, StableDiffusionLoraLoaderMixin): pipe._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(pipe.text_encoder, lora_scale) else: scale_lora_layers(pipe.text_encoder, lora_scale) max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2 if isinstance(prompt, str): prompt = [prompt] if not skip_parsing: prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2) if uncond_prompt is not None: if isinstance(uncond_prompt, str): uncond_prompt = [uncond_prompt] uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2) else: prompt_tokens = [ token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True).input_ids ] prompt_weights = [[1.0] * len(token) for token in prompt_tokens] if uncond_prompt is not None: if isinstance(uncond_prompt, str): uncond_prompt = [uncond_prompt] uncond_tokens = [ token[1:-1] for token in pipe.tokenizer(uncond_prompt, max_length=max_length, truncation=True).input_ids ] uncond_weights = [[1.0] * len(token) for token in uncond_tokens] # round up the longest length of tokens to a multiple of (model_max_length - 2) max_length = max([len(token) for token in prompt_tokens]) if uncond_prompt is not None: max_length = max(max_length, max([len(token) for token in uncond_tokens])) max_embeddings_multiples = min( max_embeddings_multiples, (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1, ) max_embeddings_multiples = max(1, max_embeddings_multiples) max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2 # pad the length of tokens and weights bos = pipe.tokenizer.bos_token_id eos = pipe.tokenizer.eos_token_id pad = getattr(pipe.tokenizer, "pad_token_id", eos) prompt_tokens, prompt_weights = pad_tokens_and_weights( prompt_tokens, prompt_weights, max_length, bos, eos, pad, no_boseos_middle=no_boseos_middle, chunk_length=pipe.tokenizer.model_max_length, ) prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=pipe.device) if uncond_prompt is not None: uncond_tokens, uncond_weights = pad_tokens_and_weights( uncond_tokens, uncond_weights, max_length, bos, eos, pad, no_boseos_middle=no_boseos_middle, chunk_length=pipe.tokenizer.model_max_length, ) uncond_tokens = torch.tensor(uncond_tokens, dtype=torch.long, device=pipe.device) # get the embeddings text_embeddings = get_unweighted_text_embeddings( pipe, prompt_tokens, pipe.tokenizer.model_max_length, no_boseos_middle=no_boseos_middle, clip_skip=clip_skip ) prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=text_embeddings.device) if uncond_prompt is not None: uncond_embeddings = get_unweighted_text_embeddings( pipe, uncond_tokens, pipe.tokenizer.model_max_length, no_boseos_middle=no_boseos_middle, clip_skip=clip_skip, ) uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=uncond_embeddings.device) # assign weights to the prompts and normalize in the sense of mean # TODO: should we normalize by chunk or in a whole (current implementation)? if (not skip_parsing) and (not skip_weighting): previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype) text_embeddings *= prompt_weights.unsqueeze(-1) current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype) text_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1) if uncond_prompt is not None: previous_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype) uncond_embeddings *= uncond_weights.unsqueeze(-1) current_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype) uncond_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1) if pipe.text_encoder is not None: if isinstance(pipe, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(pipe.text_encoder, lora_scale) if uncond_prompt is not None: return text_embeddings, uncond_embeddings return text_embeddings, None def preprocess_image(image, batch_size): w, h = image.size w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) image = np.array(image).astype(np.float32) / 255.0 image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size) image = torch.from_numpy(image) return 2.0 * image - 1.0 def preprocess_mask(mask, batch_size, scale_factor=8): if not isinstance(mask, torch.Tensor): mask = mask.convert("L") w, h = mask.size w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"]) mask = np.array(mask).astype(np.float32) / 255.0 mask = np.tile(mask, (4, 1, 1)) mask = np.vstack([mask[None]] * batch_size) mask = 1 - mask # repaint white, keep black mask = torch.from_numpy(mask) return mask else: valid_mask_channel_sizes = [1, 3] # if mask channel is fourth tensor dimension, permute dimensions to pytorch standard (B, C, H, W) if mask.shape[3] in valid_mask_channel_sizes: mask = mask.permute(0, 3, 1, 2) elif mask.shape[1] not in valid_mask_channel_sizes: raise ValueError( f"Mask channel dimension of size in {valid_mask_channel_sizes} should be second or fourth dimension," f" but received mask of shape {tuple(mask.shape)}" ) # (potentially) reduce mask channel dimension from 3 to 1 for broadcasting to latent shape mask = mask.mean(dim=1, keepdim=True) h, w = mask.shape[-2:] h, w = (x - x % 8 for x in (h, w)) # resize to integer multiple of 8 mask = torch.nn.functional.interpolate(mask, (h // scale_factor, w // scale_factor)) return mask class StableDiffusionLongPromptWeightingPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, FromSingleFileMixin, ): r""" Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing weighting in prompt. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ model_cpu_offload_seq = "text_encoder-->unet->vae" _optional_components = ["safety_checker", "feature_extractor"] _exclude_from_cpu_offload = ["safety_checker"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config( requires_safety_checker=requires_safety_checker, ) def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, max_embeddings_multiples=3, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, clip_skip: Optional[int] = None, lora_scale: Optional[float] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list(int)`): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. """ if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if negative_prompt_embeds is None: if negative_prompt is None: negative_prompt = [""] * batch_size elif isinstance(negative_prompt, str): negative_prompt = [negative_prompt] * batch_size if batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) if prompt_embeds is None or negative_prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = self.maybe_convert_prompt(negative_prompt, self.tokenizer) prompt_embeds1, negative_prompt_embeds1 = get_weighted_text_embeddings( pipe=self, prompt=prompt, uncond_prompt=negative_prompt if do_classifier_free_guidance else None, max_embeddings_multiples=max_embeddings_multiples, clip_skip=clip_skip, lora_scale=lora_scale, ) if prompt_embeds is None: prompt_embeds = prompt_embeds1 if negative_prompt_embeds is None: negative_prompt_embeds = negative_prompt_embeds1 bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: bs_embed, seq_len, _ = negative_prompt_embeds.shape negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def check_inputs( self, prompt, height, width, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) def get_timesteps(self, num_inference_steps, strength, device, is_text2img): if is_text2img: return self.scheduler.timesteps.to(device), num_inference_steps else: # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] return timesteps, num_inference_steps - t_start def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) else: has_nsfw_concept = None return image, has_nsfw_concept def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def prepare_latents( self, image, timestep, num_images_per_prompt, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): if image is None: batch_size = batch_size * num_images_per_prompt shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents, None, None else: image = image.to(device=self.device, dtype=dtype) init_latent_dist = self.vae.encode(image).latent_dist init_latents = init_latent_dist.sample(generator=generator) init_latents = self.vae.config.scaling_factor * init_latents # Expand init_latents for batch_size and num_images_per_prompt init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0) init_latents_orig = init_latents # add noise to latents using the timesteps noise = randn_tensor(init_latents.shape, generator=generator, device=self.device, dtype=dtype) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents, init_latents_orig, noise @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, image: Union[torch.Tensor, PIL.Image.Image] = None, mask_image: Union[torch.Tensor, PIL.Image.Image] = None, height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, strength: float = 0.8, num_images_per_prompt: Optional[int] = 1, add_predicted_noise: Optional[bool] = False, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, max_embeddings_multiples: Optional[int] = 3, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, is_cancelled_callback: Optional[Callable[[], bool]] = None, clip_skip: Optional[int] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). image (`torch.Tensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. mask_image (`torch.Tensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. add_predicted_noise (`bool`, *optional*, defaults to True): Use predicted noise instead of random noise when constructing noisy versions of the original image in the reverse diffusion process eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. is_cancelled_callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. If the function returns `True`, the inference will be cancelled. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). Returns: `None` if cancelled by `is_cancelled_callback`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 lora_scale = cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None # 3. Encode input prompt prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, max_embeddings_multiples, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip, lora_scale=lora_scale, ) dtype = prompt_embeds.dtype # 4. Preprocess image and mask if isinstance(image, PIL.Image.Image): image = preprocess_image(image, batch_size) if image is not None: image = image.to(device=self.device, dtype=dtype) if isinstance(mask_image, PIL.Image.Image): mask_image = preprocess_mask(mask_image, batch_size, self.vae_scale_factor) if mask_image is not None: mask = mask_image.to(device=self.device, dtype=dtype) mask = torch.cat([mask] * num_images_per_prompt) else: mask = None # 5. set timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device, image is None) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # 6. Prepare latent variables latents, init_latents_orig, noise = self.prepare_latents( image, latent_timestep, num_images_per_prompt, batch_size, self.unet.config.in_channels, height, width, dtype, device, generator, latents, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, ).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if mask is not None: # masking if add_predicted_noise: init_latents_proper = self.scheduler.add_noise( init_latents_orig, noise_pred_uncond, torch.tensor([t]) ) else: init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t])) latents = (init_latents_proper * mask) + (latents * (1 - mask)) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if i % callback_steps == 0: if callback is not None: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if is_cancelled_callback is not None and is_cancelled_callback(): return None if output_type == "latent": image = latents has_nsfw_concept = None elif output_type == "pil": # 9. Post-processing image = self.decode_latents(latents) # 10. Run safety checker image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) # 11. Convert to PIL image = self.numpy_to_pil(image) else: # 9. Post-processing image = self.decode_latents(latents) # 10. Run safety checker image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return image, has_nsfw_concept return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) def text2img( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, max_embeddings_multiples: Optional[int] = 3, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, is_cancelled_callback: Optional[Callable[[], bool]] = None, clip_skip=None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ): r""" Function for text-to-image generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. is_cancelled_callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. If the function returns `True`, the inference will be cancelled. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). Returns: `None` if cancelled by `is_cancelled_callback`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ return self.__call__( prompt=prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_embeddings_multiples=max_embeddings_multiples, output_type=output_type, return_dict=return_dict, callback=callback, is_cancelled_callback=is_cancelled_callback, clip_skip=clip_skip, callback_steps=callback_steps, cross_attention_kwargs=cross_attention_kwargs, ) def img2img( self, image: Union[torch.Tensor, PIL.Image.Image], prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, strength: float = 0.8, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, num_images_per_prompt: Optional[int] = 1, eta: Optional[float] = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, max_embeddings_multiples: Optional[int] = 3, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, is_cancelled_callback: Optional[Callable[[], bool]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ): r""" Function for image-to-image generation. Args: image (`torch.Tensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. This parameter will be modulated by `strength`. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. is_cancelled_callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. If the function returns `True`, the inference will be cancelled. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). Returns: `None` if cancelled by `is_cancelled_callback`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ return self.__call__( prompt=prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, strength=strength, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_embeddings_multiples=max_embeddings_multiples, output_type=output_type, return_dict=return_dict, callback=callback, is_cancelled_callback=is_cancelled_callback, callback_steps=callback_steps, cross_attention_kwargs=cross_attention_kwargs, ) def inpaint( self, image: Union[torch.Tensor, PIL.Image.Image], mask_image: Union[torch.Tensor, PIL.Image.Image], prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, strength: float = 0.8, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, num_images_per_prompt: Optional[int] = 1, add_predicted_noise: Optional[bool] = False, eta: Optional[float] = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, max_embeddings_multiples: Optional[int] = 3, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, is_cancelled_callback: Optional[Callable[[], bool]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ): r""" Function for inpaint. Args: image (`torch.Tensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. This is the image whose masked region will be inpainted. mask_image (`torch.Tensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength` is 1, the denoising process will be run on the masked area for the full number of iterations specified in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur. num_inference_steps (`int`, *optional*, defaults to 50): The reference number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. This parameter will be modulated by `strength`, as explained above. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. add_predicted_noise (`bool`, *optional*, defaults to True): Use predicted noise instead of random noise when constructing noisy versions of the original image in the reverse diffusion process eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. is_cancelled_callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. If the function returns `True`, the inference will be cancelled. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). Returns: `None` if cancelled by `is_cancelled_callback`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ return self.__call__( prompt=prompt, negative_prompt=negative_prompt, image=image, mask_image=mask_image, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, strength=strength, num_images_per_prompt=num_images_per_prompt, add_predicted_noise=add_predicted_noise, eta=eta, generator=generator, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_embeddings_multiples=max_embeddings_multiples, output_type=output_type, return_dict=return_dict, callback=callback, is_cancelled_callback=is_cancelled_callback, callback_steps=callback_steps, cross_attention_kwargs=cross_attention_kwargs, )
diffusers/examples/community/lpw_stable_diffusion.py/0
{ "file_path": "diffusers/examples/community/lpw_stable_diffusion.py", "repo_id": "diffusers", "token_count": 32302 }
118
import inspect import os import numpy as np import torch import torch.nn.functional as nnf from PIL import Image from torch.optim.adam import Adam from tqdm import tqdm from diffusers import StableDiffusionPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput def retrieve_timesteps( scheduler, num_inference_steps=None, device=None, timesteps=None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class NullTextPipeline(StableDiffusionPipeline): def get_noise_pred(self, latents, t, context): latents_input = torch.cat([latents] * 2) guidance_scale = 7.5 noise_pred = self.unet(latents_input, t, encoder_hidden_states=context)["sample"] noise_pred_uncond, noise_prediction_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) latents = self.prev_step(noise_pred, t, latents) return latents def get_noise_pred_single(self, latents, t, context): noise_pred = self.unet(latents, t, encoder_hidden_states=context)["sample"] return noise_pred @torch.no_grad() def image2latent(self, image_path): image = Image.open(image_path).convert("RGB") image = np.array(image) image = torch.from_numpy(image).float() / 127.5 - 1 image = image.permute(2, 0, 1).unsqueeze(0).to(self.device) latents = self.vae.encode(image)["latent_dist"].mean latents = latents * 0.18215 return latents @torch.no_grad() def latent2image(self, latents): latents = 1 / 0.18215 * latents.detach() image = self.vae.decode(latents)["sample"].detach() image = self.processor.postprocess(image, output_type="pil")[0] return image def prev_step(self, model_output, timestep, sample): prev_timestep = timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps alpha_prod_t = self.scheduler.alphas_cumprod[timestep] alpha_prod_t_prev = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) beta_prod_t = 1 - alpha_prod_t pred_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_sample_direction = (1 - alpha_prod_t_prev) ** 0.5 * model_output prev_sample = alpha_prod_t_prev**0.5 * pred_original_sample + pred_sample_direction return prev_sample def next_step(self, model_output, timestep, sample): timestep, next_timestep = ( min(timestep - self.scheduler.config.num_train_timesteps // self.num_inference_steps, 999), timestep, ) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_timestep] beta_prod_t = 1 - alpha_prod_t next_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 next_sample_direction = (1 - alpha_prod_t_next) ** 0.5 * model_output next_sample = alpha_prod_t_next**0.5 * next_original_sample + next_sample_direction return next_sample def null_optimization(self, latents, context, num_inner_steps, epsilon): uncond_embeddings, cond_embeddings = context.chunk(2) uncond_embeddings_list = [] latent_cur = latents[-1] bar = tqdm(total=num_inner_steps * self.num_inference_steps) for i in range(self.num_inference_steps): uncond_embeddings = uncond_embeddings.clone().detach() uncond_embeddings.requires_grad = True optimizer = Adam([uncond_embeddings], lr=1e-2 * (1.0 - i / 100.0)) latent_prev = latents[len(latents) - i - 2] t = self.scheduler.timesteps[i] with torch.no_grad(): noise_pred_cond = self.get_noise_pred_single(latent_cur, t, cond_embeddings) for j in range(num_inner_steps): noise_pred_uncond = self.get_noise_pred_single(latent_cur, t, uncond_embeddings) noise_pred = noise_pred_uncond + 7.5 * (noise_pred_cond - noise_pred_uncond) latents_prev_rec = self.prev_step(noise_pred, t, latent_cur) loss = nnf.mse_loss(latents_prev_rec, latent_prev) optimizer.zero_grad() loss.backward() optimizer.step() loss_item = loss.item() bar.update() if loss_item < epsilon + i * 2e-5: break for j in range(j + 1, num_inner_steps): bar.update() uncond_embeddings_list.append(uncond_embeddings[:1].detach()) with torch.no_grad(): context = torch.cat([uncond_embeddings, cond_embeddings]) latent_cur = self.get_noise_pred(latent_cur, t, context) bar.close() return uncond_embeddings_list @torch.no_grad() def ddim_inversion_loop(self, latent, context): self.scheduler.set_timesteps(self.num_inference_steps) _, cond_embeddings = context.chunk(2) all_latent = [latent] latent = latent.clone().detach() with torch.no_grad(): for i in range(0, self.num_inference_steps): t = self.scheduler.timesteps[len(self.scheduler.timesteps) - i - 1] noise_pred = self.unet(latent, t, encoder_hidden_states=cond_embeddings)["sample"] latent = self.next_step(noise_pred, t, latent) all_latent.append(latent) return all_latent def get_context(self, prompt): uncond_input = self.tokenizer( [""], padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt" ) uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] text_input = self.tokenizer( [prompt], padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0] context = torch.cat([uncond_embeddings, text_embeddings]) return context def invert( self, image_path: str, prompt: str, num_inner_steps=10, early_stop_epsilon=1e-6, num_inference_steps=50 ): self.num_inference_steps = num_inference_steps context = self.get_context(prompt) latent = self.image2latent(image_path) ddim_latents = self.ddim_inversion_loop(latent, context) if os.path.exists(image_path + ".pt"): uncond_embeddings = torch.load(image_path + ".pt") else: uncond_embeddings = self.null_optimization(ddim_latents, context, num_inner_steps, early_stop_epsilon) uncond_embeddings = torch.stack(uncond_embeddings, 0) torch.save(uncond_embeddings, image_path + ".pt") return ddim_latents[-1], uncond_embeddings @torch.no_grad() def __call__( self, prompt, uncond_embeddings, inverted_latent, num_inference_steps: int = 50, timesteps=None, guidance_scale=7.5, negative_prompt=None, num_images_per_prompt=1, generator=None, latents=None, prompt_embeds=None, negative_prompt_embeds=None, output_type="pil", ): self._guidance_scale = guidance_scale # 0. Default height and width to unet height = self.unet.config.sample_size * self.vae_scale_factor width = self.unet.config.sample_size * self.vae_scale_factor # to deal with lora scaling and other possible forward hook callback_steps = None # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ) # 2. Define call parameter device = self._execution_device # 3. Encode input prompt prompt_embeds, _ = self.encode_prompt( prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) latents = inverted_latent with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): noise_pred_uncond = self.unet(latents, t, encoder_hidden_states=uncond_embeddings[i])["sample"] noise_pred = self.unet(latents, t, encoder_hidden_states=prompt_embeds)["sample"] noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] progress_bar.update() if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ 0 ] else: image = latents image = self.image_processor.postprocess( image, output_type=output_type, do_denormalize=[True] * image.shape[0] ) # Offload all models self.maybe_free_model_hooks() return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=False)
diffusers/examples/community/pipeline_null_text_inversion.py/0
{ "file_path": "diffusers/examples/community/pipeline_null_text_inversion.py", "repo_id": "diffusers", "token_count": 5423 }
119
import argparse import inspect import os import time import warnings from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from PIL import Image from transformers import CLIPTokenizer from diffusers import OnnxRuntimeModel, StableDiffusionImg2ImgPipeline, UniPCMultistepScheduler from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( deprecate, logging, replace_example_docstring, ) from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> # !pip install opencv-python transformers accelerate >>> from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, UniPCMultistepScheduler >>> from diffusers.utils import load_image >>> import numpy as np >>> import torch >>> import cv2 >>> from PIL import Image >>> # download an image >>> image = load_image( ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" ... ) >>> np_image = np.array(image) >>> # get canny image >>> np_image = cv2.Canny(np_image, 100, 200) >>> np_image = np_image[:, :, None] >>> np_image = np.concatenate([np_image, np_image, np_image], axis=2) >>> canny_image = Image.fromarray(np_image) >>> # load control net and stable diffusion v1-5 >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) >>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 ... ) >>> # speed up diffusion process with faster scheduler and memory optimization >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) >>> pipe.enable_model_cpu_offload() >>> # generate image >>> generator = torch.manual_seed(0) >>> image = pipe( ... "futuristic-looking woman", ... num_inference_steps=20, ... generator=generator, ... image=image, ... control_image=canny_image, ... ).images[0] ``` """ def prepare_image(image): if isinstance(image, torch.Tensor): # Batch single image if image.ndim == 3: image = image.unsqueeze(0) image = image.to(dtype=torch.float32) else: # preprocess image if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): image = [np.array(i.convert("RGB"))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 return image class OnnxStableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline): vae_encoder: OnnxRuntimeModel vae_decoder: OnnxRuntimeModel text_encoder: OnnxRuntimeModel tokenizer: CLIPTokenizer unet: OnnxRuntimeModel scheduler: KarrasDiffusionSchedulers def __init__( self, vae_encoder: OnnxRuntimeModel, vae_decoder: OnnxRuntimeModel, text_encoder: OnnxRuntimeModel, tokenizer: CLIPTokenizer, unet: OnnxRuntimeModel, scheduler: KarrasDiffusionSchedulers, ): super().__init__() self.register_modules( vae_encoder=vae_encoder, vae_decoder=vae_decoder, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (4 - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False ) def _encode_prompt( self, prompt: Union[str, List[str]], num_images_per_prompt: Optional[int], do_classifier_free_guidance: bool, negative_prompt: Optional[str], prompt_embeds: Optional[np.ndarray] = None, negative_prompt_embeds: Optional[np.ndarray] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`): prompt to be encoded num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`np.ndarray`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`np.ndarray`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. """ if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="np", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids if not np.array_equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] * batch_size elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="np", ) negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] if do_classifier_free_guidance: negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): warnings.warn( "The decode_latents method is deprecated and will be removed in a future version. Please" " use VaeImageProcessor instead", FutureWarning, ) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, num_controlnet, prompt, image, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Check `image` if num_controlnet == 1: self.check_image(image, prompt, prompt_embeds) elif num_controlnet > 1: if not isinstance(image, list): raise TypeError("For multiple controlnets: `image` must be type `list`") # When `image` is a nested list: # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) elif any(isinstance(i, list) for i in image): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif len(image) != num_controlnet: raise ValueError( f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {num_controlnet} ControlNets." ) for image_ in image: self.check_image(image_, prompt, prompt_embeds) else: assert False # Check `controlnet_conditioning_scale` if num_controlnet == 1: if not isinstance(controlnet_conditioning_scale, float): raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") elif num_controlnet > 1: if isinstance(controlnet_conditioning_scale, list): if any(isinstance(i, list) for i in controlnet_conditioning_scale): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif ( isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != num_controlnet ): raise ValueError( "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" " the same length as the number of controlnets" ) else: assert False if len(control_guidance_start) != len(control_guidance_end): raise ValueError( f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." ) if num_controlnet > 1: if len(control_guidance_start) != num_controlnet: raise ValueError( f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {num_controlnet} controlnets available. Make sure to provide {num_controlnet}." ) for start, end in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError( f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." ) if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if ( not image_is_pil and not image_is_tensor and not image_is_np and not image_is_pil_list and not image_is_tensor_list and not image_is_np_list ): raise TypeError( f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" ) if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError( f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" ) # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image def prepare_control_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False, ): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and not guess_mode: image = torch.cat([image] * 2) return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] return timesteps, num_inference_steps - t_start def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: _image = image.cpu().detach().numpy() init_latents = self.vae_encoder(sample=_image)[0] init_latents = torch.from_numpy(init_latents).to(device=device, dtype=dtype) init_latents = 0.18215 * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: # expand init_latents for batch_size deprecation_message = ( f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" " your script to pass as many initial images as text prompts to suppress this warning." ) deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, num_controlnet: int, fp16: bool = True, prompt: Union[str, List[str]] = None, image: Union[ torch.Tensor, PIL.Image.Image, np.ndarray, List[torch.Tensor], List[PIL.Image.Image], List[np.ndarray], ] = None, control_image: Union[ torch.Tensor, PIL.Image.Image, np.ndarray, List[torch.Tensor], List[PIL.Image.Image], List[np.ndarray], ] = None, height: Optional[int] = None, width: Optional[int] = None, strength: float = 0.8, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_conditioning_scale: Union[float, List[float]] = 0.8, guess_mode: bool = False, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 1.0, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): The initial image will be used as the starting point for the image generation process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded again. control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or width are passed, `image` is resized according to them. If multiple ControlNets are specified in init, images must be passed as a list such that each element of the list can be correctly batched for input to a single controlnet. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original unet. If multiple ControlNets are specified in init, you can set the corresponding scale as a list. Note that by default, we use a smaller conditioning scale for inpainting than for [`~StableDiffusionControlNetPipeline.__call__`]. guess_mode (`bool`, *optional*, defaults to `False`): In this mode, the ControlNet encoder will try best to recognize the content of the input image even if you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended. control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): The percentage of total steps at which the controlnet starts applying. control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): The percentage of total steps at which the controlnet stops applying. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ if fp16: torch_dtype = torch.float16 np_dtype = np.float16 else: torch_dtype = torch.float32 np_dtype = np.float32 # align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): mult = num_controlnet control_guidance_start, control_guidance_end = ( mult * [control_guidance_start], mult * [control_guidance_end], ) # 1. Check inputs. Raise error if not correct self.check_inputs( num_controlnet, prompt, control_image, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 if num_controlnet > 1 and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * num_controlnet # 3. Encode input prompt prompt_embeds = self._encode_prompt( prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # 4. Prepare image image = self.image_processor.preprocess(image).to(dtype=torch.float32) # 5. Prepare controlnet_conditioning_image if num_controlnet == 1: control_image = self.prepare_control_image( image=control_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=torch_dtype, do_classifier_free_guidance=do_classifier_free_guidance, guess_mode=guess_mode, ) elif num_controlnet > 1: control_images = [] for control_image_ in control_image: control_image_ = self.prepare_control_image( image=control_image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=torch_dtype, do_classifier_free_guidance=do_classifier_free_guidance, guess_mode=guess_mode, ) control_images.append(control_image_) control_image = control_images else: assert False # 5. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # 6. Prepare latent variables latents = self.prepare_latents( image, latent_timestep, batch_size, num_images_per_prompt, torch_dtype, device, generator, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7.1 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if num_controlnet == 1 else keeps) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] # predict the noise residual _latent_model_input = latent_model_input.cpu().detach().numpy() _prompt_embeds = np.array(prompt_embeds, dtype=np_dtype) _t = np.array([t.cpu().detach().numpy()], dtype=np_dtype) if num_controlnet == 1: control_images = np.array([control_image], dtype=np_dtype) else: control_images = [] for _control_img in control_image: _control_img = _control_img.cpu().detach().numpy() control_images.append(_control_img) control_images = np.array(control_images, dtype=np_dtype) control_scales = np.array(cond_scale, dtype=np_dtype) control_scales = np.resize(control_scales, (num_controlnet, 1)) noise_pred = self.unet( sample=_latent_model_input, timestep=_t, encoder_hidden_states=_prompt_embeds, controlnet_conds=control_images, conditioning_scales=control_scales, )[0] noise_pred = torch.from_numpy(noise_pred).to(device) # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": _latents = latents.cpu().detach().numpy() / 0.18215 _latents = np.array(_latents, dtype=np_dtype) image = self.vae_decoder(latent_sample=_latents)[0] image = torch.from_numpy(image).to(device, dtype=torch.float32) has_nsfw_concept = None else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--sd_model", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument( "--onnx_model_dir", type=str, required=True, help="Path to the ONNX directory", ) parser.add_argument("--qr_img_path", type=str, required=True, help="Path to the qr code image") args = parser.parse_args() qr_image = Image.open(args.qr_img_path) qr_image = qr_image.resize((512, 512)) # init stable diffusion pipeline pipeline = StableDiffusionImg2ImgPipeline.from_pretrained(args.sd_model) pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config) provider = ["CUDAExecutionProvider", "CPUExecutionProvider"] onnx_pipeline = OnnxStableDiffusionControlNetImg2ImgPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained( os.path.join(args.onnx_model_dir, "vae_encoder"), provider=provider ), vae_decoder=OnnxRuntimeModel.from_pretrained( os.path.join(args.onnx_model_dir, "vae_decoder"), provider=provider ), text_encoder=OnnxRuntimeModel.from_pretrained( os.path.join(args.onnx_model_dir, "text_encoder"), provider=provider ), tokenizer=pipeline.tokenizer, unet=OnnxRuntimeModel.from_pretrained(os.path.join(args.onnx_model_dir, "unet"), provider=provider), scheduler=pipeline.scheduler, ) onnx_pipeline = onnx_pipeline.to("cuda") prompt = "a cute cat fly to the moon" negative_prompt = "paintings, sketches, worst quality, low quality, normal quality, lowres, normal quality, monochrome, grayscale, skin spots, acnes, skin blemishes, age spot, glans, nsfw, nipples, necklace, worst quality, low quality, watermark, username, signature, multiple breasts, lowres, bad anatomy, bad hands, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, bad feet, single color, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, disfigured, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, bad body perspect" for i in range(10): start_time = time.time() image = onnx_pipeline( num_controlnet=2, prompt=prompt, negative_prompt=negative_prompt, image=qr_image, control_image=[qr_image, qr_image], width=512, height=512, strength=0.75, num_inference_steps=20, num_images_per_prompt=1, controlnet_conditioning_scale=[0.8, 0.8], control_guidance_start=[0.3, 0.3], control_guidance_end=[0.9, 0.9], ).images[0] print(time.time() - start_time) image.save("output_qr_code.png")
diffusers/examples/community/run_onnx_controlnet.py/0
{ "file_path": "diffusers/examples/community/run_onnx_controlnet.py", "repo_id": "diffusers", "token_count": 19726 }
120
# # Copyright 2024 The HuggingFace Inc. team. # SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import os from collections import OrderedDict from typing import List, Optional, Tuple, Union import numpy as np import onnx import onnx_graphsurgeon as gs import PIL.Image import tensorrt as trt import torch from cuda import cudart from huggingface_hub import snapshot_download from huggingface_hub.utils import validate_hf_hub_args from onnx import shape_inference from packaging import version from polygraphy import cuda from polygraphy.backend.common import bytes_from_path from polygraphy.backend.onnx.loader import fold_constants from polygraphy.backend.trt import ( CreateConfig, Profile, engine_from_bytes, engine_from_network, network_from_onnx_path, save_engine, ) from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict, deprecate from diffusers.image_processor import VaeImageProcessor from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.pipelines.stable_diffusion import ( StableDiffusionPipelineOutput, StableDiffusionSafetyChecker, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import retrieve_latents from diffusers.schedulers import DDIMScheduler from diffusers.utils import logging """ Installation instructions python3 -m pip install --upgrade transformers diffusers>=0.16.0 python3 -m pip install --upgrade tensorrt~=10.2.0 python3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com python3 -m pip install onnxruntime """ TRT_LOGGER = trt.Logger(trt.Logger.ERROR) logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Map of numpy dtype -> torch dtype numpy_to_torch_dtype_dict = { np.uint8: torch.uint8, np.int8: torch.int8, np.int16: torch.int16, np.int32: torch.int32, np.int64: torch.int64, np.float16: torch.float16, np.float32: torch.float32, np.float64: torch.float64, np.complex64: torch.complex64, np.complex128: torch.complex128, } if np.version.full_version >= "1.24.0": numpy_to_torch_dtype_dict[np.bool_] = torch.bool else: numpy_to_torch_dtype_dict[np.bool] = torch.bool # Map of torch dtype -> numpy dtype torch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()} def preprocess_image(image): """ image: torch.Tensor """ w, h = image.size w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 image = image.resize((w, h)) image = np.array(image).astype(np.float32) / 255.0 image = image[None].transpose(0, 3, 1, 2) image = torch.from_numpy(image).contiguous() return 2.0 * image - 1.0 class Engine: def __init__(self, engine_path): self.engine_path = engine_path self.engine = None self.context = None self.buffers = OrderedDict() self.tensors = OrderedDict() def __del__(self): [buf.free() for buf in self.buffers.values() if isinstance(buf, cuda.DeviceArray)] del self.engine del self.context del self.buffers del self.tensors def build( self, onnx_path, fp16, input_profile=None, enable_all_tactics=False, timing_cache=None, ): logger.warning(f"Building TensorRT engine for {onnx_path}: {self.engine_path}") p = Profile() if input_profile: for name, dims in input_profile.items(): assert len(dims) == 3 p.add(name, min=dims[0], opt=dims[1], max=dims[2]) extra_build_args = {} if not enable_all_tactics: extra_build_args["tactic_sources"] = [] engine = engine_from_network( network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]), config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **extra_build_args), save_timing_cache=timing_cache, ) save_engine(engine, path=self.engine_path) def load(self): logger.warning(f"Loading TensorRT engine: {self.engine_path}") self.engine = engine_from_bytes(bytes_from_path(self.engine_path)) def activate(self): self.context = self.engine.create_execution_context() def allocate_buffers(self, shape_dict=None, device="cuda"): for binding in range(self.engine.num_io_tensors): name = self.engine.get_tensor_name(binding) if shape_dict and name in shape_dict: shape = shape_dict[name] else: shape = self.engine.get_tensor_shape(name) dtype = trt.nptype(self.engine.get_tensor_dtype(name)) if self.engine.get_tensor_mode(name) == trt.TensorIOMode.INPUT: self.context.set_input_shape(name, shape) tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device) self.tensors[name] = tensor def infer(self, feed_dict, stream): for name, buf in feed_dict.items(): self.tensors[name].copy_(buf) for name, tensor in self.tensors.items(): self.context.set_tensor_address(name, tensor.data_ptr()) noerror = self.context.execute_async_v3(stream) if not noerror: raise ValueError("ERROR: inference failed.") return self.tensors class Optimizer: def __init__(self, onnx_graph): self.graph = gs.import_onnx(onnx_graph) def cleanup(self, return_onnx=False): self.graph.cleanup().toposort() if return_onnx: return gs.export_onnx(self.graph) def select_outputs(self, keep, names=None): self.graph.outputs = [self.graph.outputs[o] for o in keep] if names: for i, name in enumerate(names): self.graph.outputs[i].name = name def fold_constants(self, return_onnx=False): onnx_graph = fold_constants(gs.export_onnx(self.graph), allow_onnxruntime_shape_inference=True) self.graph = gs.import_onnx(onnx_graph) if return_onnx: return onnx_graph def infer_shapes(self, return_onnx=False): onnx_graph = gs.export_onnx(self.graph) if onnx_graph.ByteSize() > 2147483648: raise TypeError("ERROR: model size exceeds supported 2GB limit") else: onnx_graph = shape_inference.infer_shapes(onnx_graph) self.graph = gs.import_onnx(onnx_graph) if return_onnx: return onnx_graph class BaseModel: def __init__(self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77): self.model = model self.name = "SD Model" self.fp16 = fp16 self.device = device self.min_batch = 1 self.max_batch = max_batch_size self.min_image_shape = 256 # min image resolution: 256x256 self.max_image_shape = 1024 # max image resolution: 1024x1024 self.min_latent_shape = self.min_image_shape // 8 self.max_latent_shape = self.max_image_shape // 8 self.embedding_dim = embedding_dim self.text_maxlen = text_maxlen def get_model(self): return self.model def get_input_names(self): pass def get_output_names(self): pass def get_dynamic_axes(self): return None def get_sample_input(self, batch_size, image_height, image_width): pass def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape): return None def get_shape_dict(self, batch_size, image_height, image_width): return None def optimize(self, onnx_graph): opt = Optimizer(onnx_graph) opt.cleanup() opt.fold_constants() opt.infer_shapes() onnx_opt_graph = opt.cleanup(return_onnx=True) return onnx_opt_graph def check_dims(self, batch_size, image_height, image_width): assert batch_size >= self.min_batch and batch_size <= self.max_batch assert image_height % 8 == 0 or image_width % 8 == 0 latent_height = image_height // 8 latent_width = image_width // 8 assert latent_height >= self.min_latent_shape and latent_height <= self.max_latent_shape assert latent_width >= self.min_latent_shape and latent_width <= self.max_latent_shape return (latent_height, latent_width) def get_minmax_dims(self, batch_size, image_height, image_width, static_batch, static_shape): min_batch = batch_size if static_batch else self.min_batch max_batch = batch_size if static_batch else self.max_batch latent_height = image_height // 8 latent_width = image_width // 8 min_image_height = image_height if static_shape else self.min_image_shape max_image_height = image_height if static_shape else self.max_image_shape min_image_width = image_width if static_shape else self.min_image_shape max_image_width = image_width if static_shape else self.max_image_shape min_latent_height = latent_height if static_shape else self.min_latent_shape max_latent_height = latent_height if static_shape else self.max_latent_shape min_latent_width = latent_width if static_shape else self.min_latent_shape max_latent_width = latent_width if static_shape else self.max_latent_shape return ( min_batch, max_batch, min_image_height, max_image_height, min_image_width, max_image_width, min_latent_height, max_latent_height, min_latent_width, max_latent_width, ) def getOnnxPath(model_name, onnx_dir, opt=True): return os.path.join(onnx_dir, model_name + (".opt" if opt else "") + ".onnx") def getEnginePath(model_name, engine_dir): return os.path.join(engine_dir, model_name + ".plan") def build_engines( models: dict, engine_dir, onnx_dir, onnx_opset, opt_image_height, opt_image_width, opt_batch_size=1, force_engine_rebuild=False, static_batch=False, static_shape=True, enable_all_tactics=False, timing_cache=None, ): built_engines = {} if not os.path.isdir(onnx_dir): os.makedirs(onnx_dir) if not os.path.isdir(engine_dir): os.makedirs(engine_dir) # Export models to ONNX for model_name, model_obj in models.items(): engine_path = getEnginePath(model_name, engine_dir) if force_engine_rebuild or not os.path.exists(engine_path): logger.warning("Building Engines...") logger.warning("Engine build can take a while to complete") onnx_path = getOnnxPath(model_name, onnx_dir, opt=False) onnx_opt_path = getOnnxPath(model_name, onnx_dir) if force_engine_rebuild or not os.path.exists(onnx_opt_path): if force_engine_rebuild or not os.path.exists(onnx_path): logger.warning(f"Exporting model: {onnx_path}") model = model_obj.get_model() with torch.inference_mode(), torch.autocast("cuda"): inputs = model_obj.get_sample_input(opt_batch_size, opt_image_height, opt_image_width) torch.onnx.export( model, inputs, onnx_path, export_params=True, opset_version=onnx_opset, do_constant_folding=True, input_names=model_obj.get_input_names(), output_names=model_obj.get_output_names(), dynamic_axes=model_obj.get_dynamic_axes(), ) del model torch.cuda.empty_cache() gc.collect() else: logger.warning(f"Found cached model: {onnx_path}") # Optimize onnx if force_engine_rebuild or not os.path.exists(onnx_opt_path): logger.warning(f"Generating optimizing model: {onnx_opt_path}") onnx_opt_graph = model_obj.optimize(onnx.load(onnx_path)) onnx.save(onnx_opt_graph, onnx_opt_path) else: logger.warning(f"Found cached optimized model: {onnx_opt_path} ") # Build TensorRT engines for model_name, model_obj in models.items(): engine_path = getEnginePath(model_name, engine_dir) engine = Engine(engine_path) onnx_path = getOnnxPath(model_name, onnx_dir, opt=False) onnx_opt_path = getOnnxPath(model_name, onnx_dir) if force_engine_rebuild or not os.path.exists(engine.engine_path): engine.build( onnx_opt_path, fp16=True, input_profile=model_obj.get_input_profile( opt_batch_size, opt_image_height, opt_image_width, static_batch=static_batch, static_shape=static_shape, ), timing_cache=timing_cache, ) built_engines[model_name] = engine # Load and activate TensorRT engines for model_name, model_obj in models.items(): engine = built_engines[model_name] engine.load() engine.activate() return built_engines def runEngine(engine, feed_dict, stream): return engine.infer(feed_dict, stream) class CLIP(BaseModel): def __init__(self, model, device, max_batch_size, embedding_dim): super(CLIP, self).__init__( model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim ) self.name = "CLIP" def get_input_names(self): return ["input_ids"] def get_output_names(self): return ["text_embeddings", "pooler_output"] def get_dynamic_axes(self): return {"input_ids": {0: "B"}, "text_embeddings": {0: "B"}} def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape): self.check_dims(batch_size, image_height, image_width) min_batch, max_batch, _, _, _, _, _, _, _, _ = self.get_minmax_dims( batch_size, image_height, image_width, static_batch, static_shape ) return { "input_ids": [(min_batch, self.text_maxlen), (batch_size, self.text_maxlen), (max_batch, self.text_maxlen)] } def get_shape_dict(self, batch_size, image_height, image_width): self.check_dims(batch_size, image_height, image_width) return { "input_ids": (batch_size, self.text_maxlen), "text_embeddings": (batch_size, self.text_maxlen, self.embedding_dim), } def get_sample_input(self, batch_size, image_height, image_width): self.check_dims(batch_size, image_height, image_width) return torch.zeros(batch_size, self.text_maxlen, dtype=torch.int32, device=self.device) def optimize(self, onnx_graph): opt = Optimizer(onnx_graph) opt.select_outputs([0]) # delete graph output#1 opt.cleanup() opt.fold_constants() opt.infer_shapes() opt.select_outputs([0], names=["text_embeddings"]) # rename network output opt_onnx_graph = opt.cleanup(return_onnx=True) return opt_onnx_graph def make_CLIP(model, device, max_batch_size, embedding_dim, inpaint=False): return CLIP(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim) class UNet(BaseModel): def __init__( self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77, unet_dim=4 ): super(UNet, self).__init__( model=model, fp16=fp16, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim, text_maxlen=text_maxlen, ) self.unet_dim = unet_dim self.name = "UNet" def get_input_names(self): return ["sample", "timestep", "encoder_hidden_states"] def get_output_names(self): return ["latent"] def get_dynamic_axes(self): return { "sample": {0: "2B", 2: "H", 3: "W"}, "encoder_hidden_states": {0: "2B"}, "latent": {0: "2B", 2: "H", 3: "W"}, } def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape): latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) ( min_batch, max_batch, _, _, _, _, min_latent_height, max_latent_height, min_latent_width, max_latent_width, ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape) return { "sample": [ (2 * min_batch, self.unet_dim, min_latent_height, min_latent_width), (2 * batch_size, self.unet_dim, latent_height, latent_width), (2 * max_batch, self.unet_dim, max_latent_height, max_latent_width), ], "encoder_hidden_states": [ (2 * min_batch, self.text_maxlen, self.embedding_dim), (2 * batch_size, self.text_maxlen, self.embedding_dim), (2 * max_batch, self.text_maxlen, self.embedding_dim), ], } def get_shape_dict(self, batch_size, image_height, image_width): latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) return { "sample": (2 * batch_size, self.unet_dim, latent_height, latent_width), "encoder_hidden_states": (2 * batch_size, self.text_maxlen, self.embedding_dim), "latent": (2 * batch_size, 4, latent_height, latent_width), } def get_sample_input(self, batch_size, image_height, image_width): latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) dtype = torch.float16 if self.fp16 else torch.float32 return ( torch.randn( 2 * batch_size, self.unet_dim, latent_height, latent_width, dtype=torch.float32, device=self.device ), torch.tensor([1.0], dtype=torch.float32, device=self.device), torch.randn(2 * batch_size, self.text_maxlen, self.embedding_dim, dtype=dtype, device=self.device), ) def make_UNet(model, device, max_batch_size, embedding_dim, inpaint=False): return UNet( model, fp16=True, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim, unet_dim=(9 if inpaint else 4), ) class VAE(BaseModel): def __init__(self, model, device, max_batch_size, embedding_dim): super(VAE, self).__init__( model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim ) self.name = "VAE decoder" def get_input_names(self): return ["latent"] def get_output_names(self): return ["images"] def get_dynamic_axes(self): return {"latent": {0: "B", 2: "H", 3: "W"}, "images": {0: "B", 2: "8H", 3: "8W"}} def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape): latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) ( min_batch, max_batch, _, _, _, _, min_latent_height, max_latent_height, min_latent_width, max_latent_width, ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape) return { "latent": [ (min_batch, 4, min_latent_height, min_latent_width), (batch_size, 4, latent_height, latent_width), (max_batch, 4, max_latent_height, max_latent_width), ] } def get_shape_dict(self, batch_size, image_height, image_width): latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) return { "latent": (batch_size, 4, latent_height, latent_width), "images": (batch_size, 3, image_height, image_width), } def get_sample_input(self, batch_size, image_height, image_width): latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) return torch.randn(batch_size, 4, latent_height, latent_width, dtype=torch.float32, device=self.device) def make_VAE(model, device, max_batch_size, embedding_dim, inpaint=False): return VAE(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim) class TorchVAEEncoder(torch.nn.Module): def __init__(self, model): super().__init__() self.vae_encoder = model def forward(self, x): return retrieve_latents(self.vae_encoder.encode(x)) class VAEEncoder(BaseModel): def __init__(self, model, device, max_batch_size, embedding_dim): super(VAEEncoder, self).__init__( model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim ) self.name = "VAE encoder" def get_model(self): vae_encoder = TorchVAEEncoder(self.model) return vae_encoder def get_input_names(self): return ["images"] def get_output_names(self): return ["latent"] def get_dynamic_axes(self): return {"images": {0: "B", 2: "8H", 3: "8W"}, "latent": {0: "B", 2: "H", 3: "W"}} def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape): assert batch_size >= self.min_batch and batch_size <= self.max_batch min_batch = batch_size if static_batch else self.min_batch max_batch = batch_size if static_batch else self.max_batch self.check_dims(batch_size, image_height, image_width) ( min_batch, max_batch, min_image_height, max_image_height, min_image_width, max_image_width, _, _, _, _, ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape) return { "images": [ (min_batch, 3, min_image_height, min_image_width), (batch_size, 3, image_height, image_width), (max_batch, 3, max_image_height, max_image_width), ] } def get_shape_dict(self, batch_size, image_height, image_width): latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) return { "images": (batch_size, 3, image_height, image_width), "latent": (batch_size, 4, latent_height, latent_width), } def get_sample_input(self, batch_size, image_height, image_width): self.check_dims(batch_size, image_height, image_width) return torch.randn(batch_size, 3, image_height, image_width, dtype=torch.float32, device=self.device) def make_VAEEncoder(model, device, max_batch_size, embedding_dim, inpaint=False): return VAEEncoder(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim) class TensorRTStableDiffusionImg2ImgPipeline(DiffusionPipeline): r""" Pipeline for image-to-image generation using TensorRT accelerated Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: DDIMScheduler, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection = None, requires_safety_checker: bool = True, stages=["clip", "unet", "vae", "vae_encoder"], image_height: int = 512, image_width: int = 512, max_batch_size: int = 16, # ONNX export parameters onnx_opset: int = 17, onnx_dir: str = "onnx", # TensorRT engine build parameters engine_dir: str = "engine", force_engine_rebuild: bool = False, timing_cache: str = "timing_cache", ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder, ) self.stages = stages self.image_height, self.image_width = image_height, image_width self.inpaint = False self.onnx_opset = onnx_opset self.onnx_dir = onnx_dir self.engine_dir = engine_dir self.force_engine_rebuild = force_engine_rebuild self.timing_cache = timing_cache self.build_static_batch = False self.build_dynamic_shape = False self.max_batch_size = max_batch_size # TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation. if self.build_dynamic_shape or self.image_height > 512 or self.image_width > 512: self.max_batch_size = 4 self.stream = None # loaded in loadResources() self.models = {} # loaded in __loadModels() self.engine = {} # loaded in build_engines() self.vae.forward = self.vae.decode self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def __loadModels(self): # Load pipeline models self.embedding_dim = self.text_encoder.config.hidden_size models_args = { "device": self.torch_device, "max_batch_size": self.max_batch_size, "embedding_dim": self.embedding_dim, "inpaint": self.inpaint, } if "clip" in self.stages: self.models["clip"] = make_CLIP(self.text_encoder, **models_args) if "unet" in self.stages: self.models["unet"] = make_UNet(self.unet, **models_args) if "vae" in self.stages: self.models["vae"] = make_VAE(self.vae, **models_args) if "vae_encoder" in self.stages: self.models["vae_encoder"] = make_VAEEncoder(self.vae, **models_args) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker( self, image: Union[torch.Tensor, PIL.Image.Image], device: torch.device, dtype: torch.dtype ) -> Tuple[Union[torch.Tensor, PIL.Image.Image], Optional[bool]]: r""" Runs the safety checker on the given image. Args: image (Union[torch.Tensor, PIL.Image.Image]): The input image to be checked. device (torch.device): The device to run the safety checker on. dtype (torch.dtype): The data type of the input image. Returns: (image, has_nsfw_concept) Tuple[Union[torch.Tensor, PIL.Image.Image], Optional[bool]]: A tuple containing the processed image and a boolean indicating whether the image has a NSFW (Not Safe for Work) concept. """ if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept @classmethod @validate_hf_hub_args def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): cache_dir = kwargs.pop("cache_dir", None) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) cls.cached_folder = ( pretrained_model_name_or_path if os.path.isdir(pretrained_model_name_or_path) else snapshot_download( pretrained_model_name_or_path, cache_dir=cache_dir, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, ) ) def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings: bool = False): super().to(torch_device, silence_dtype_warnings=silence_dtype_warnings) self.onnx_dir = os.path.join(self.cached_folder, self.onnx_dir) self.engine_dir = os.path.join(self.cached_folder, self.engine_dir) self.timing_cache = os.path.join(self.cached_folder, self.timing_cache) # set device self.torch_device = self._execution_device logger.warning(f"Running inference on device: {self.torch_device}") # load models self.__loadModels() # build engines self.engine = build_engines( self.models, self.engine_dir, self.onnx_dir, self.onnx_opset, opt_image_height=self.image_height, opt_image_width=self.image_width, force_engine_rebuild=self.force_engine_rebuild, static_batch=self.build_static_batch, static_shape=not self.build_dynamic_shape, timing_cache=self.timing_cache, ) return self def __initialize_timesteps(self, timesteps, strength): self.scheduler.set_timesteps(timesteps) offset = self.scheduler.steps_offset if hasattr(self.scheduler, "steps_offset") else 0 init_timestep = int(timesteps * strength) + offset init_timestep = min(init_timestep, timesteps) t_start = max(timesteps - init_timestep + offset, 0) timesteps = self.scheduler.timesteps[t_start:].to(self.torch_device) return timesteps, t_start def __preprocess_images(self, batch_size, images=()): init_images = [] for image in images: image = image.to(self.torch_device).float() image = image.repeat(batch_size, 1, 1, 1) init_images.append(image) return tuple(init_images) def __encode_image(self, init_image): init_latents = runEngine(self.engine["vae_encoder"], {"images": init_image}, self.stream)["latent"] init_latents = 0.18215 * init_latents return init_latents def __encode_prompt(self, prompt, negative_prompt): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). """ # Tokenize prompt text_input_ids = ( self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) .input_ids.type(torch.int32) .to(self.torch_device) ) # NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt text_embeddings = runEngine(self.engine["clip"], {"input_ids": text_input_ids}, self.stream)[ "text_embeddings" ].clone() # Tokenize negative prompt uncond_input_ids = ( self.tokenizer( negative_prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) .input_ids.type(torch.int32) .to(self.torch_device) ) uncond_embeddings = runEngine(self.engine["clip"], {"input_ids": uncond_input_ids}, self.stream)[ "text_embeddings" ] # Concatenate the unconditional and text embeddings into a single batch to avoid doing two forward passes for classifier free guidance text_embeddings = torch.cat([uncond_embeddings, text_embeddings]).to(dtype=torch.float16) return text_embeddings def __denoise_latent( self, latents, text_embeddings, timesteps=None, step_offset=0, mask=None, masked_image_latents=None ): if not isinstance(timesteps, torch.Tensor): timesteps = self.scheduler.timesteps for step_index, timestep in enumerate(timesteps): # Expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) latent_model_input = self.scheduler.scale_model_input(latent_model_input, timestep) if isinstance(mask, torch.Tensor): latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) # Predict the noise residual timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep noise_pred = runEngine( self.engine["unet"], {"sample": latent_model_input, "timestep": timestep_float, "encoder_hidden_states": text_embeddings}, self.stream, )["latent"] # Perform guidance noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self._guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, timestep, latents).prev_sample latents = 1.0 / 0.18215 * latents return latents def __decode_latent(self, latents): images = runEngine(self.engine["vae"], {"latent": latents}, self.stream)["images"] images = (images / 2 + 0.5).clamp(0, 1) return images.cpu().permute(0, 2, 3, 1).float().numpy() def __loadResources(self, image_height, image_width, batch_size): self.stream = cudart.cudaStreamCreate()[1] # Allocate buffers for TensorRT engine bindings for model_name, obj in self.models.items(): self.engine[model_name].allocate_buffers( shape_dict=obj.get_shape_dict(batch_size, image_height, image_width), device=self.torch_device ) @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, image: Union[torch.Tensor, PIL.Image.Image] = None, strength: float = 0.8, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. image (`PIL.Image.Image`): `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will be masked out with `mask_image` and repainted according to `prompt`. strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. """ self.generator = generator self.denoising_steps = num_inference_steps self._guidance_scale = guidance_scale # Pre-compute latent input scales and linear multistep coefficients self.scheduler.set_timesteps(self.denoising_steps, device=self.torch_device) # Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 prompt = [prompt] elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"Expected prompt to be of type list or str but got {type(prompt)}") if negative_prompt is None: negative_prompt = [""] * batch_size if negative_prompt is not None and isinstance(negative_prompt, str): negative_prompt = [negative_prompt] assert len(prompt) == len(negative_prompt) if batch_size > self.max_batch_size: raise ValueError( f"Batch size {len(prompt)} is larger than allowed {self.max_batch_size}. If dynamic shape is used, then maximum batch size is 4" ) # load resources self.__loadResources(self.image_height, self.image_width, batch_size) with torch.inference_mode(), torch.autocast("cuda"), trt.Runtime(TRT_LOGGER): # Initialize timesteps timesteps, t_start = self.__initialize_timesteps(self.denoising_steps, strength) latent_timestep = timesteps[:1].repeat(batch_size) # Pre-process input image if isinstance(image, PIL.Image.Image): image = preprocess_image(image) init_image = self.__preprocess_images(batch_size, (image,))[0] # VAE encode init image init_latents = self.__encode_image(init_image) # Add noise to latents using timesteps noise = torch.randn( init_latents.shape, generator=self.generator, device=self.torch_device, dtype=torch.float32 ) latents = self.scheduler.add_noise(init_latents, noise, latent_timestep) # CLIP text encoder text_embeddings = self.__encode_prompt(prompt, negative_prompt) # UNet denoiser latents = self.__denoise_latent(latents, text_embeddings, timesteps=timesteps, step_offset=t_start) # VAE decode latent images = self.__decode_latent(latents) images, has_nsfw_concept = self.run_safety_checker(images, self.torch_device, text_embeddings.dtype) images = self.numpy_to_pil(images) return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
diffusers/examples/community/stable_diffusion_tensorrt_img2img.py/0
{ "file_path": "diffusers/examples/community/stable_diffusion_tensorrt_img2img.py", "repo_id": "diffusers", "token_count": 21784 }
121
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The LCM team and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import argparse import copy import functools import gc import logging import math import os import random import shutil from contextlib import nullcontext from pathlib import Path import accelerate import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from peft import LoraConfig, get_peft_model_state_dict, set_peft_model_state_dict from torchvision import transforms from torchvision.transforms.functional import crop from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig import diffusers from diffusers import ( AutoencoderKL, DDPMScheduler, LCMScheduler, StableDiffusionXLPipeline, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.training_utils import cast_training_params, resolve_interpolation_mode from diffusers.utils import ( check_min_version, convert_state_dict_to_diffusers, convert_unet_state_dict_to_peft, is_wandb_available, ) from diffusers.utils.import_utils import is_xformers_available if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.31.0.dev0") logger = get_logger(__name__) DATASET_NAME_MAPPING = { "lambdalabs/naruto-blip-captions": ("image", "text"), } class DDIMSolver: def __init__(self, alpha_cumprods, timesteps=1000, ddim_timesteps=50): # DDIM sampling parameters step_ratio = timesteps // ddim_timesteps self.ddim_timesteps = (np.arange(1, ddim_timesteps + 1) * step_ratio).round().astype(np.int64) - 1 self.ddim_alpha_cumprods = alpha_cumprods[self.ddim_timesteps] self.ddim_alpha_cumprods_prev = np.asarray( [alpha_cumprods[0]] + alpha_cumprods[self.ddim_timesteps[:-1]].tolist() ) # convert to torch tensors self.ddim_timesteps = torch.from_numpy(self.ddim_timesteps).long() self.ddim_alpha_cumprods = torch.from_numpy(self.ddim_alpha_cumprods) self.ddim_alpha_cumprods_prev = torch.from_numpy(self.ddim_alpha_cumprods_prev) def to(self, device): self.ddim_timesteps = self.ddim_timesteps.to(device) self.ddim_alpha_cumprods = self.ddim_alpha_cumprods.to(device) self.ddim_alpha_cumprods_prev = self.ddim_alpha_cumprods_prev.to(device) return self def ddim_step(self, pred_x0, pred_noise, timestep_index): alpha_cumprod_prev = extract_into_tensor(self.ddim_alpha_cumprods_prev, timestep_index, pred_x0.shape) dir_xt = (1.0 - alpha_cumprod_prev).sqrt() * pred_noise x_prev = alpha_cumprod_prev.sqrt() * pred_x0 + dir_xt return x_prev def log_validation(vae, args, accelerator, weight_dtype, step, unet=None, is_final_validation=False): logger.info("Running validation... ") pipeline = StableDiffusionXLPipeline.from_pretrained( args.pretrained_teacher_model, vae=vae, scheduler=LCMScheduler.from_pretrained(args.pretrained_teacher_model, subfolder="scheduler"), revision=args.revision, torch_dtype=weight_dtype, ).to(accelerator.device) pipeline.set_progress_bar_config(disable=True) to_load = None if not is_final_validation: if unet is None: raise ValueError("Must provide a `unet` when doing intermediate validation.") unet = accelerator.unwrap_model(unet) state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(unet)) to_load = state_dict else: to_load = args.output_dir pipeline.load_lora_weights(to_load) pipeline.fuse_lora() if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) validation_prompts = [ "cute sundar pichai character", "robotic cat with wings", "a photo of yoda", "a cute creature with blue eyes", ] image_logs = [] for _, prompt in enumerate(validation_prompts): images = [] if torch.backends.mps.is_available(): autocast_ctx = nullcontext() else: autocast_ctx = torch.autocast(accelerator.device.type, dtype=weight_dtype) with autocast_ctx: images = pipeline( prompt=prompt, num_inference_steps=4, num_images_per_prompt=4, generator=generator, guidance_scale=0.0, ).images image_logs.append({"validation_prompt": prompt, "images": images}) for tracker in accelerator.trackers: if tracker.name == "tensorboard": for log in image_logs: images = log["images"] validation_prompt = log["validation_prompt"] formatted_images = [] for image in images: formatted_images.append(np.asarray(image)) formatted_images = np.stack(formatted_images) tracker.writer.add_images(validation_prompt, formatted_images, step, dataformats="NHWC") elif tracker.name == "wandb": formatted_images = [] for log in image_logs: images = log["images"] validation_prompt = log["validation_prompt"] for image in images: image = wandb.Image(image, caption=validation_prompt) formatted_images.append(image) logger_name = "test" if is_final_validation else "validation" tracker.log({logger_name: formatted_images}) else: logger.warning(f"image logging not implemented for {tracker.name}") del pipeline gc.collect() torch.cuda.empty_cache() return image_logs def append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less") return x[(...,) + (None,) * dims_to_append] # From LCMScheduler.get_scalings_for_boundary_condition_discrete def scalings_for_boundary_conditions(timestep, sigma_data=0.5, timestep_scaling=10.0): scaled_timestep = timestep_scaling * timestep c_skip = sigma_data**2 / (scaled_timestep**2 + sigma_data**2) c_out = scaled_timestep / (scaled_timestep**2 + sigma_data**2) ** 0.5 return c_skip, c_out # Compare LCMScheduler.step, Step 4 def get_predicted_original_sample(model_output, timesteps, sample, prediction_type, alphas, sigmas): alphas = extract_into_tensor(alphas, timesteps, sample.shape) sigmas = extract_into_tensor(sigmas, timesteps, sample.shape) if prediction_type == "epsilon": pred_x_0 = (sample - sigmas * model_output) / alphas elif prediction_type == "sample": pred_x_0 = model_output elif prediction_type == "v_prediction": pred_x_0 = alphas * sample - sigmas * model_output else: raise ValueError( f"Prediction type {prediction_type} is not supported; currently, `epsilon`, `sample`, and `v_prediction`" f" are supported." ) return pred_x_0 # Based on step 4 in DDIMScheduler.step def get_predicted_noise(model_output, timesteps, sample, prediction_type, alphas, sigmas): alphas = extract_into_tensor(alphas, timesteps, sample.shape) sigmas = extract_into_tensor(sigmas, timesteps, sample.shape) if prediction_type == "epsilon": pred_epsilon = model_output elif prediction_type == "sample": pred_epsilon = (sample - alphas * model_output) / sigmas elif prediction_type == "v_prediction": pred_epsilon = alphas * model_output + sigmas * sample else: raise ValueError( f"Prediction type {prediction_type} is not supported; currently, `epsilon`, `sample`, and `v_prediction`" f" are supported." ) return pred_epsilon def extract_into_tensor(a, t, x_shape): b, *_ = t.shape out = a.gather(-1, t) return out.reshape(b, *((1,) * (len(x_shape) - 1))) def import_model_class_from_model_name_or_path( pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" ): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder=subfolder, revision=revision ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "CLIPTextModelWithProjection": from transformers import CLIPTextModelWithProjection return CLIPTextModelWithProjection else: raise ValueError(f"{model_class} is not supported.") def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") # ----------Model Checkpoint Loading Arguments---------- parser.add_argument( "--pretrained_teacher_model", type=str, default=None, required=True, help="Path to pretrained LDM teacher model or model identifier from huggingface.co/models.", ) parser.add_argument( "--pretrained_vae_model_name_or_path", type=str, default=None, help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.", ) parser.add_argument( "--teacher_revision", type=str, default=None, required=False, help="Revision of pretrained LDM teacher model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained LDM model identifier from huggingface.co/models.", ) # ----------Training Arguments---------- # ----General Training Arguments---- parser.add_argument( "--output_dir", type=str, default="lcm-xl-distilled", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") # ----Logging---- parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) # ----Checkpointing---- parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) # ----Image Processing---- parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing an image." ) parser.add_argument( "--caption_column", type=str, default="text", help="The column of the dataset containing a caption or a list of captions.", ) parser.add_argument( "--resolution", type=int, default=1024, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--interpolation_type", type=str, default="bilinear", help=( "The interpolation function used when resizing images to the desired resolution. Choose between `bilinear`," " `bicubic`, `box`, `nearest`, `nearest_exact`, `hamming`, and `lanczos`." ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--encode_batch_size", type=int, default=8, help="Batch size to use for VAE encoding of the images for efficient processing.", ) # ----Dataloader---- parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) # ----Batch Size and Training Steps---- parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) # ----Learning Rate---- parser.add_argument( "--learning_rate", type=float, default=1e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) # ----Optimizer (Adam)---- parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") # ----Diffusion Training Arguments---- # ----Latent Consistency Distillation (LCD) Specific Arguments---- parser.add_argument( "--w_min", type=float, default=3.0, required=False, help=( "The minimum guidance scale value for guidance scale sampling. Note that we are using the Imagen CFG" " formulation rather than the LCM formulation, which means all guidance scales have 1 added to them as" " compared to the original paper." ), ) parser.add_argument( "--w_max", type=float, default=15.0, required=False, help=( "The maximum guidance scale value for guidance scale sampling. Note that we are using the Imagen CFG" " formulation rather than the LCM formulation, which means all guidance scales have 1 added to them as" " compared to the original paper." ), ) parser.add_argument( "--num_ddim_timesteps", type=int, default=50, help="The number of timesteps to use for DDIM sampling.", ) parser.add_argument( "--loss_type", type=str, default="l2", choices=["l2", "huber"], help="The type of loss to use for the LCD loss.", ) parser.add_argument( "--huber_c", type=float, default=0.001, help="The huber loss parameter. Only used if `--loss_type=huber`.", ) parser.add_argument( "--lora_rank", type=int, default=64, help="The rank of the LoRA projection matrix.", ) parser.add_argument( "--lora_alpha", type=int, default=64, help=( "The value of the LoRA alpha parameter, which controls the scaling factor in front of the LoRA weight" " update delta_W. No scaling will be performed if this value is equal to `lora_rank`." ), ) parser.add_argument( "--lora_dropout", type=float, default=0.0, help="The dropout probability for the dropout layer added before applying the LoRA to each layer input.", ) parser.add_argument( "--lora_target_modules", type=str, default=None, help=( "A comma-separated string of target module keys to add LoRA to. If not set, a default list of modules will" " be used. By default, LoRA will be applied to all conv and linear layers." ), ) parser.add_argument( "--vae_encode_batch_size", type=int, default=8, required=False, help=( "The batch size used when encoding (and decoding) images to latents (and vice versa) using the VAE." " Encoding or decoding the whole batch at once may run into OOM issues." ), ) parser.add_argument( "--timestep_scaling_factor", type=float, default=10.0, help=( "The multiplicative timestep scaling factor used when calculating the boundary scalings for LCM. The" " higher the scaling is, the lower the approximation error, but the default value of 10.0 should typically" " suffice." ), ) # ----Mixed Precision---- parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) # ----Training Optimizations---- parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) # ----Distributed Training---- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") # ----------Validation Arguments---------- parser.add_argument( "--validation_steps", type=int, default=200, help="Run validation every X steps.", ) # ----------Huggingface Hub Arguments----------- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) # ----------Accelerate Arguments---------- parser.add_argument( "--tracker_project_name", type=str, default="text2image-fine-tune", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank return args # Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt def encode_prompt(prompt_batch, text_encoders, tokenizers, is_train=True): prompt_embeds_list = [] captions = [] for caption in prompt_batch: if isinstance(caption, str): captions.append(caption) elif isinstance(caption, (list, np.ndarray)): # take a random caption if there are multiple captions.append(random.choice(caption) if is_train else caption[0]) with torch.no_grad(): for tokenizer, text_encoder in zip(tokenizers, text_encoders): text_inputs = tokenizer( captions, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_embeds = text_encoder( text_input_ids.to(text_encoder.device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.hidden_states[-2] bs_embed, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1) prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1) return prompt_embeds, pooled_prompt_embeds def main(args): if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `huggingface-cli login` to authenticate with the Hub." ) logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, split_batches=True, # It's important to set this to True when using webdataset to get the right number of steps for lr scheduling. If set to False, the number of steps will be devide by the number of processes assuming batches are multiplied by the number of processes ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token, private=True, ).repo_id # 1. Create the noise scheduler and the desired noise schedule. noise_scheduler = DDPMScheduler.from_pretrained( args.pretrained_teacher_model, subfolder="scheduler", revision=args.teacher_revision ) # DDPMScheduler calculates the alpha and sigma noise schedules (based on the alpha bars) for us alpha_schedule = torch.sqrt(noise_scheduler.alphas_cumprod) sigma_schedule = torch.sqrt(1 - noise_scheduler.alphas_cumprod) # Initialize the DDIM ODE solver for distillation. solver = DDIMSolver( noise_scheduler.alphas_cumprod.numpy(), timesteps=noise_scheduler.config.num_train_timesteps, ddim_timesteps=args.num_ddim_timesteps, ) # 2. Load tokenizers from SDXL checkpoint. tokenizer_one = AutoTokenizer.from_pretrained( args.pretrained_teacher_model, subfolder="tokenizer", revision=args.teacher_revision, use_fast=False ) tokenizer_two = AutoTokenizer.from_pretrained( args.pretrained_teacher_model, subfolder="tokenizer_2", revision=args.teacher_revision, use_fast=False ) # 3. Load text encoders from SDXL checkpoint. # import correct text encoder classes text_encoder_cls_one = import_model_class_from_model_name_or_path( args.pretrained_teacher_model, args.teacher_revision ) text_encoder_cls_two = import_model_class_from_model_name_or_path( args.pretrained_teacher_model, args.teacher_revision, subfolder="text_encoder_2" ) text_encoder_one = text_encoder_cls_one.from_pretrained( args.pretrained_teacher_model, subfolder="text_encoder", revision=args.teacher_revision ) text_encoder_two = text_encoder_cls_two.from_pretrained( args.pretrained_teacher_model, subfolder="text_encoder_2", revision=args.teacher_revision ) # 4. Load VAE from SDXL checkpoint (or more stable VAE) vae_path = ( args.pretrained_teacher_model if args.pretrained_vae_model_name_or_path is None else args.pretrained_vae_model_name_or_path ) vae = AutoencoderKL.from_pretrained( vae_path, subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, revision=args.teacher_revision, ) # 6. Freeze teacher vae, text_encoders. vae.requires_grad_(False) text_encoder_one.requires_grad_(False) text_encoder_two.requires_grad_(False) # 7. Create online student U-Net. unet = UNet2DConditionModel.from_pretrained( args.pretrained_teacher_model, subfolder="unet", revision=args.teacher_revision ) unet.requires_grad_(False) # Check that all trainable models are in full precision low_precision_error_string = ( " Please make sure to always have all model weights in full float32 precision when starting training - even if" " doing mixed precision training, copy of the weights should still be float32." ) if accelerator.unwrap_model(unet).dtype != torch.float32: raise ValueError( f"Controlnet loaded as datatype {accelerator.unwrap_model(unet).dtype}. {low_precision_error_string}" ) # 8. Handle mixed precision and device placement # For mixed precision training we cast all non-trainable weigths to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move unet, vae and text_encoder to device and cast to weight_dtype # The VAE is in float32 to avoid NaN losses. unet.to(accelerator.device, dtype=weight_dtype) if args.pretrained_vae_model_name_or_path is None: vae.to(accelerator.device, dtype=torch.float32) else: vae.to(accelerator.device, dtype=weight_dtype) text_encoder_one.to(accelerator.device, dtype=weight_dtype) text_encoder_two.to(accelerator.device, dtype=weight_dtype) # 9. Add LoRA to the student U-Net, only the LoRA projection matrix will be updated by the optimizer. if args.lora_target_modules is not None: lora_target_modules = [module_key.strip() for module_key in args.lora_target_modules.split(",")] else: lora_target_modules = [ "to_q", "to_k", "to_v", "to_out.0", "proj_in", "proj_out", "ff.net.0.proj", "ff.net.2", "conv1", "conv2", "conv_shortcut", "downsamplers.0.conv", "upsamplers.0.conv", "time_emb_proj", ] lora_config = LoraConfig( r=args.lora_rank, target_modules=lora_target_modules, lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout, ) unet.add_adapter(lora_config) # Also move the alpha and sigma noise schedules to accelerator.device. alpha_schedule = alpha_schedule.to(accelerator.device) sigma_schedule = sigma_schedule.to(accelerator.device) solver = solver.to(accelerator.device) # 10. Handle saving and loading of checkpoints # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: unet_ = accelerator.unwrap_model(unet) # also save the checkpoints in native `diffusers` format so that it can be easily # be independently loaded via `load_lora_weights()`. state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(unet_)) StableDiffusionXLPipeline.save_lora_weights(output_dir, unet_lora_layers=state_dict) for _, model in enumerate(models): # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): # load the LoRA into the model unet_ = accelerator.unwrap_model(unet) lora_state_dict, _ = StableDiffusionXLPipeline.lora_state_dict(input_dir) unet_state_dict = { f'{k.replace("unet.", "")}': v for k, v in lora_state_dict.items() if k.startswith("unet.") } unet_state_dict = convert_unet_state_dict_to_peft(unet_state_dict) incompatible_keys = set_peft_model_state_dict(unet_, unet_state_dict, adapter_name="default") if incompatible_keys is not None: # check only for unexpected keys unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None) if unexpected_keys: logger.warning( f"Loading adapter weights from state_dict led to unexpected keys not found in the model: " f" {unexpected_keys}. " ) for _ in range(len(models)): # pop models so that they are not loaded again models.pop() # Make sure the trainable params are in float32. This is again needed since the base models # are in `weight_dtype`. More details: # https://github.com/huggingface/diffusers/pull/6514#discussion_r1449796804 if args.mixed_precision == "fp16": cast_training_params(unet_, dtype=torch.float32) accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) # 11. Enable optimizations if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # 12. Optimizer creation params_to_optimize = filter(lambda p: p.requires_grad, unet.parameters()) optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # 13. Dataset creation and data processing # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, ) else: data_files = {} if args.train_data_dir is not None: data_files["train"] = os.path.join(args.train_data_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder # Preprocessing the datasets. column_names = dataset["train"].column_names # Get the column names for input/target. dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) if args.image_column is None: image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: image_column = args.image_column if image_column not in column_names: raise ValueError( f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" ) if args.caption_column is None: caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: caption_column = args.caption_column if caption_column not in column_names: raise ValueError( f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" ) # Preprocessing the datasets. interpolation_mode = resolve_interpolation_mode(args.interpolation_type) train_resize = transforms.Resize(args.resolution, interpolation=interpolation_mode) train_crop = transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution) train_flip = transforms.RandomHorizontalFlip(p=1.0) train_transforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]) def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] # image aug original_sizes = [] all_images = [] crop_top_lefts = [] for image in images: original_sizes.append((image.height, image.width)) image = train_resize(image) if args.center_crop: y1 = max(0, int(round((image.height - args.resolution) / 2.0))) x1 = max(0, int(round((image.width - args.resolution) / 2.0))) image = train_crop(image) else: y1, x1, h, w = train_crop.get_params(image, (args.resolution, args.resolution)) image = crop(image, y1, x1, h, w) if args.random_flip and random.random() < 0.5: # flip x1 = image.width - x1 image = train_flip(image) crop_top_left = (y1, x1) crop_top_lefts.append(crop_top_left) image = train_transforms(image) all_images.append(image) examples["original_sizes"] = original_sizes examples["crop_top_lefts"] = crop_top_lefts examples["pixel_values"] = all_images examples["captions"] = list(examples[caption_column]) return examples with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset["train"].with_transform(preprocess_train) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() original_sizes = [example["original_sizes"] for example in examples] crop_top_lefts = [example["crop_top_lefts"] for example in examples] captions = [example["captions"] for example in examples] return { "pixel_values": pixel_values, "captions": captions, "original_sizes": original_sizes, "crop_top_lefts": crop_top_lefts, } # DataLoaders creation: train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # 14. Embeddings for the UNet. # Adapted from pipeline.StableDiffusionXLPipeline._get_add_time_ids def compute_embeddings(prompt_batch, original_sizes, crop_coords, text_encoders, tokenizers, is_train=True): def compute_time_ids(original_size, crops_coords_top_left): target_size = (args.resolution, args.resolution) add_time_ids = list(original_size + crops_coords_top_left + target_size) add_time_ids = torch.tensor([add_time_ids]) add_time_ids = add_time_ids.to(accelerator.device, dtype=weight_dtype) return add_time_ids prompt_embeds, pooled_prompt_embeds = encode_prompt(prompt_batch, text_encoders, tokenizers, is_train) add_text_embeds = pooled_prompt_embeds add_time_ids = torch.cat([compute_time_ids(s, c) for s, c in zip(original_sizes, crop_coords)]) prompt_embeds = prompt_embeds.to(accelerator.device) add_text_embeds = add_text_embeds.to(accelerator.device) unet_added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} return {"prompt_embeds": prompt_embeds, **unet_added_cond_kwargs} text_encoders = [text_encoder_one, text_encoder_two] tokenizers = [tokenizer_one, tokenizer_two] compute_embeddings_fn = functools.partial(compute_embeddings, text_encoders=text_encoders, tokenizers=tokenizers) # 15. LR Scheduler creation # Scheduler and math around the number of training steps. # Check the PR https://github.com/huggingface/diffusers/pull/8312 for detailed explanation. num_warmup_steps_for_scheduler = args.lr_warmup_steps * accelerator.num_processes if args.max_train_steps is None: len_train_dataloader_after_sharding = math.ceil(len(train_dataloader) / accelerator.num_processes) num_update_steps_per_epoch = math.ceil(len_train_dataloader_after_sharding / args.gradient_accumulation_steps) num_training_steps_for_scheduler = ( args.num_train_epochs * num_update_steps_per_epoch * accelerator.num_processes ) else: num_training_steps_for_scheduler = args.max_train_steps * accelerator.num_processes if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Make sure the trainable params are in float32. if args.mixed_precision == "fp16": # only upcast trainable parameters (LoRA) into fp32 cast_training_params(unet, dtype=torch.float32) lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=num_warmup_steps_for_scheduler, num_training_steps=num_training_steps_for_scheduler, ) # 16. Prepare for training # Prepare everything with our `accelerator`. unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch if num_training_steps_for_scheduler != args.max_train_steps * accelerator.num_processes: logger.warning( f"The length of the 'train_dataloader' after 'accelerator.prepare' ({len(train_dataloader)}) does not match " f"the expected length ({len_train_dataloader_after_sharding}) when the learning rate scheduler was created. " f"This inconsistency may result in the learning rate scheduler not functioning properly." ) # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_config = dict(vars(args)) accelerator.init_trackers(args.tracker_project_name, config=tracker_config) # 17. Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) unet.train() for epoch in range(first_epoch, args.num_train_epochs): for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet): # 1. Load and process the image and text conditioning pixel_values, text, orig_size, crop_coords = ( batch["pixel_values"], batch["captions"], batch["original_sizes"], batch["crop_top_lefts"], ) encoded_text = compute_embeddings_fn(text, orig_size, crop_coords) # encode pixel values with batch size of at most args.vae_encode_batch_size pixel_values = pixel_values.to(dtype=vae.dtype) latents = [] for i in range(0, pixel_values.shape[0], args.vae_encode_batch_size): latents.append(vae.encode(pixel_values[i : i + args.vae_encode_batch_size]).latent_dist.sample()) latents = torch.cat(latents, dim=0) latents = latents * vae.config.scaling_factor if args.pretrained_vae_model_name_or_path is None: latents = latents.to(weight_dtype) # 2. Sample a random timestep for each image t_n from the ODE solver timesteps without bias. # For the DDIM solver, the timestep schedule is [T - 1, T - k - 1, T - 2 * k - 1, ...] bsz = latents.shape[0] topk = noise_scheduler.config.num_train_timesteps // args.num_ddim_timesteps index = torch.randint(0, args.num_ddim_timesteps, (bsz,), device=latents.device).long() start_timesteps = solver.ddim_timesteps[index] timesteps = start_timesteps - topk timesteps = torch.where(timesteps < 0, torch.zeros_like(timesteps), timesteps) # 3. Get boundary scalings for start_timesteps and (end) timesteps. c_skip_start, c_out_start = scalings_for_boundary_conditions( start_timesteps, timestep_scaling=args.timestep_scaling_factor ) c_skip_start, c_out_start = [append_dims(x, latents.ndim) for x in [c_skip_start, c_out_start]] c_skip, c_out = scalings_for_boundary_conditions( timesteps, timestep_scaling=args.timestep_scaling_factor ) c_skip, c_out = [append_dims(x, latents.ndim) for x in [c_skip, c_out]] # 4. Sample noise from the prior and add it to the latents according to the noise magnitude at each # timestep (this is the forward diffusion process) [z_{t_{n + k}} in Algorithm 1] noise = torch.randn_like(latents) noisy_model_input = noise_scheduler.add_noise(latents, noise, start_timesteps) # 5. Sample a random guidance scale w from U[w_min, w_max] # Note that for LCM-LoRA distillation it is not necessary to use a guidance scale embedding w = (args.w_max - args.w_min) * torch.rand((bsz,)) + args.w_min w = w.reshape(bsz, 1, 1, 1) w = w.to(device=latents.device, dtype=latents.dtype) # 6. Prepare prompt embeds and unet_added_conditions prompt_embeds = encoded_text.pop("prompt_embeds") # 7. Get online LCM prediction on z_{t_{n + k}} (noisy_model_input), w, c, t_{n + k} (start_timesteps) noise_pred = unet( noisy_model_input, start_timesteps, encoder_hidden_states=prompt_embeds, added_cond_kwargs=encoded_text, ).sample pred_x_0 = get_predicted_original_sample( noise_pred, start_timesteps, noisy_model_input, noise_scheduler.config.prediction_type, alpha_schedule, sigma_schedule, ) model_pred = c_skip_start * noisy_model_input + c_out_start * pred_x_0 # 8. Compute the conditional and unconditional teacher model predictions to get CFG estimates of the # predicted noise eps_0 and predicted original sample x_0, then run the ODE solver using these # estimates to predict the data point in the augmented PF-ODE trajectory corresponding to the next ODE # solver timestep. # With the adapters disabled, the `unet` is the regular teacher model. accelerator.unwrap_model(unet).disable_adapters() with torch.no_grad(): # 1. Get teacher model prediction on noisy_model_input z_{t_{n + k}} and conditional embedding c cond_teacher_output = unet( noisy_model_input, start_timesteps, encoder_hidden_states=prompt_embeds, added_cond_kwargs={k: v.to(weight_dtype) for k, v in encoded_text.items()}, ).sample cond_pred_x0 = get_predicted_original_sample( cond_teacher_output, start_timesteps, noisy_model_input, noise_scheduler.config.prediction_type, alpha_schedule, sigma_schedule, ) cond_pred_noise = get_predicted_noise( cond_teacher_output, start_timesteps, noisy_model_input, noise_scheduler.config.prediction_type, alpha_schedule, sigma_schedule, ) # 2. Get teacher model prediction on noisy_model_input z_{t_{n + k}} and unconditional embedding 0 uncond_prompt_embeds = torch.zeros_like(prompt_embeds) uncond_pooled_prompt_embeds = torch.zeros_like(encoded_text["text_embeds"]) uncond_added_conditions = copy.deepcopy(encoded_text) uncond_added_conditions["text_embeds"] = uncond_pooled_prompt_embeds uncond_teacher_output = unet( noisy_model_input, start_timesteps, encoder_hidden_states=uncond_prompt_embeds.to(weight_dtype), added_cond_kwargs={k: v.to(weight_dtype) for k, v in uncond_added_conditions.items()}, ).sample uncond_pred_x0 = get_predicted_original_sample( uncond_teacher_output, start_timesteps, noisy_model_input, noise_scheduler.config.prediction_type, alpha_schedule, sigma_schedule, ) uncond_pred_noise = get_predicted_noise( uncond_teacher_output, start_timesteps, noisy_model_input, noise_scheduler.config.prediction_type, alpha_schedule, sigma_schedule, ) # 3. Calculate the CFG estimate of x_0 (pred_x0) and eps_0 (pred_noise) # Note that this uses the LCM paper's CFG formulation rather than the Imagen CFG formulation pred_x0 = cond_pred_x0 + w * (cond_pred_x0 - uncond_pred_x0) pred_noise = cond_pred_noise + w * (cond_pred_noise - uncond_pred_noise) # 4. Run one step of the ODE solver to estimate the next point x_prev on the # augmented PF-ODE trajectory (solving backward in time) # Note that the DDIM step depends on both the predicted x_0 and source noise eps_0. x_prev = solver.ddim_step(pred_x0, pred_noise, index).to(unet.dtype) # re-enable unet adapters to turn the `unet` into a student unet. accelerator.unwrap_model(unet).enable_adapters() # 9. Get target LCM prediction on x_prev, w, c, t_n (timesteps) # Note that we do not use a separate target network for LCM-LoRA distillation. with torch.no_grad(): target_noise_pred = unet( x_prev, timesteps, encoder_hidden_states=prompt_embeds, added_cond_kwargs={k: v.to(weight_dtype) for k, v in encoded_text.items()}, ).sample pred_x_0 = get_predicted_original_sample( target_noise_pred, timesteps, x_prev, noise_scheduler.config.prediction_type, alpha_schedule, sigma_schedule, ) target = c_skip * x_prev + c_out * pred_x_0 # 10. Calculate loss if args.loss_type == "l2": loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") elif args.loss_type == "huber": loss = torch.mean( torch.sqrt((model_pred.float() - target.float()) ** 2 + args.huber_c**2) - args.huber_c ) # 11. Backpropagate on the online student model (`unet`) (only LoRA) accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(params_to_optimize, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=True) # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") if global_step % args.validation_steps == 0: log_validation( vae, args, accelerator, weight_dtype, global_step, unet=unet, is_final_validation=False ) logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break # Create the pipeline using using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: unet = accelerator.unwrap_model(unet) unet_lora_state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(unet)) StableDiffusionXLPipeline.save_lora_weights(args.output_dir, unet_lora_layers=unet_lora_state_dict) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) del unet torch.cuda.empty_cache() # Final inference. if args.validation_steps is not None: log_validation(vae, args, accelerator, weight_dtype, step=global_step, unet=None, is_final_validation=True) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
diffusers/examples/consistency_distillation/train_lcm_distill_lora_sdxl.py/0
{ "file_path": "diffusers/examples/consistency_distillation/train_lcm_distill_lora_sdxl.py", "repo_id": "diffusers", "token_count": 27822 }
122
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import tempfile sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class CustomDiffusion(ExamplesTestsAccelerate): def test_custom_diffusion(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/custom_diffusion/train_custom_diffusion.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir docs/source/en/imgs --instance_prompt <new1> --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 1.0e-05 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --modifier_token <new1> --no_safe_serialization --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_custom_diffusion_weights.bin"))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "<new1>.bin"))) def test_custom_diffusion_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/custom_diffusion/train_custom_diffusion.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=<new1> --resolution=64 --train_batch_size=1 --modifier_token=<new1> --dataloader_num_workers=0 --max_train_steps=6 --checkpoints_total_limit=2 --checkpointing_steps=2 --no_safe_serialization """.split() run_command(self._launch_args + test_args) self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}) def test_custom_diffusion_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/custom_diffusion/train_custom_diffusion.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=<new1> --resolution=64 --train_batch_size=1 --modifier_token=<new1> --dataloader_num_workers=0 --max_train_steps=4 --checkpointing_steps=2 --no_safe_serialization """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}, ) resume_run_args = f""" examples/custom_diffusion/train_custom_diffusion.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=<new1> --resolution=64 --train_batch_size=1 --modifier_token=<new1> --dataloader_num_workers=0 --max_train_steps=8 --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-4 --checkpoints_total_limit=2 --no_safe_serialization """.split() run_command(self._launch_args + resume_run_args) self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"})
diffusers/examples/custom_diffusion/test_custom_diffusion.py/0
{ "file_path": "diffusers/examples/custom_diffusion/test_custom_diffusion.py", "repo_id": "diffusers", "token_count": 2234 }
123
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import tempfile sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class InstructPix2Pix(ExamplesTestsAccelerate): def test_instruct_pix2pix_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/instruct_pix2pix/train_instruct_pix2pix.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --dataset_name=hf-internal-testing/instructpix2pix-10-samples --resolution=64 --random_flip --train_batch_size=1 --max_train_steps=6 --checkpointing_steps=2 --checkpoints_total_limit=2 --output_dir {tmpdir} --seed=0 """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}, ) def test_instruct_pix2pix_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/instruct_pix2pix/train_instruct_pix2pix.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --dataset_name=hf-internal-testing/instructpix2pix-10-samples --resolution=64 --random_flip --train_batch_size=1 --max_train_steps=4 --checkpointing_steps=2 --output_dir {tmpdir} --seed=0 """.split() run_command(self._launch_args + test_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}, ) resume_run_args = f""" examples/instruct_pix2pix/train_instruct_pix2pix.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --dataset_name=hf-internal-testing/instructpix2pix-10-samples --resolution=64 --random_flip --train_batch_size=1 --max_train_steps=8 --checkpointing_steps=2 --output_dir {tmpdir} --seed=0 --resume_from_checkpoint=checkpoint-4 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + resume_run_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"}, )
diffusers/examples/instruct_pix2pix/test_instruct_pix2pix.py/0
{ "file_path": "diffusers/examples/instruct_pix2pix/test_instruct_pix2pix.py", "repo_id": "diffusers", "token_count": 1823 }
124
# Consistency Training `train_cm_ct_unconditional.py` trains a consistency model (CM) from scratch following the consistency training (CT) algorithm introduced in [Consistency Models](https://arxiv.org/abs/2303.01469) and refined in [Improved Techniques for Training Consistency Models](https://arxiv.org/abs/2310.14189). Both unconditional and class-conditional training are supported. A usage example is as follows: ```bash accelerate launch examples/research_projects/consistency_training/train_cm_ct_unconditional.py \ --dataset_name="cifar10" \ --dataset_image_column_name="img" \ --output_dir="/path/to/output/dir" \ --mixed_precision=fp16 \ --resolution=32 \ --max_train_steps=1000 --max_train_samples=10000 \ --dataloader_num_workers=8 \ --noise_precond_type="cm" --input_precond_type="cm" \ --train_batch_size=4 \ --learning_rate=1e-04 --lr_scheduler="constant" --lr_warmup_steps=0 \ --use_8bit_adam \ --use_ema \ --validation_steps=100 --eval_batch_size=4 \ --checkpointing_steps=100 --checkpoints_total_limit=10 \ --class_conditional --num_classes=10 \ ```
diffusers/examples/research_projects/consistency_training/README.md/0
{ "file_path": "diffusers/examples/research_projects/consistency_training/README.md", "repo_id": "diffusers", "token_count": 413 }
125
# GeoDiff > [!TIP] > This notebook is not actively maintained by the Diffusers team. For any questions or comments, please contact [natolambert](https://twitter.com/natolambert). This is an experimental research notebook demonstrating how to generate stable 3D structures of molecules with [GeoDiff](https://github.com/MinkaiXu/GeoDiff) and Diffusers.
diffusers/examples/research_projects/geodiff/README.md/0
{ "file_path": "diffusers/examples/research_projects/geodiff/README.md", "repo_id": "diffusers", "token_count": 95 }
126
# Distillation for quantization on Textual Inversion models to personalize text2image [Textual inversion](https://arxiv.org/abs/2208.01618) is a method to personalize text2image models like stable diffusion on your own images._By using just 3-5 images new concepts can be taught to Stable Diffusion and the model personalized on your own images_ The `textual_inversion.py` script shows how to implement the training procedure and adapt it for stable diffusion. We have enabled distillation for quantization in `textual_inversion.py` to do quantization aware training as well as distillation on the model generated by Textual Inversion method. ## Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: ```bash pip install -r requirements.txt ``` ## Prepare Datasets One picture which is from the huggingface datasets [sd-concepts-library/dicoo2](https://huggingface.co/sd-concepts-library/dicoo2) is needed, and save it to the `./dicoo` directory. The picture is shown below: <a href="https://huggingface.co/sd-concepts-library/dicoo2/blob/main/concept_images/1.jpeg"> <img src="https://huggingface.co/sd-concepts-library/dicoo2/resolve/main/concept_images/1.jpeg" width = "300" height="300"> </a> ## Get a FP32 Textual Inversion model Use the following command to fine-tune the Stable Diffusion model on the above dataset to obtain the FP32 Textual Inversion model. ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export DATA_DIR="./dicoo" accelerate launch textual_inversion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<dicoo>" --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 --scale_lr \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --output_dir="dicoo_model" ``` ## Do distillation for quantization Distillation for quantization is a method that combines [intermediate layer knowledge distillation](https://github.com/intel/neural-compressor/blob/master/docs/source/distillation.md#intermediate-layer-knowledge-distillation) and [quantization aware training](https://github.com/intel/neural-compressor/blob/master/docs/source/quantization.md#quantization-aware-training) in the same training process to improve the performance of the quantized model. Provided a FP32 model, the distillation for quantization approach will take this model itself as the teacher model and transfer the knowledges of the specified layers to the student model, i.e. quantized version of the FP32 model, during the quantization aware training process. Once you have the FP32 Textual Inversion model, the following command will take the FP32 Textual Inversion model as input to do distillation for quantization and generate the INT8 Textual Inversion model. ```bash export FP32_MODEL_NAME="./dicoo_model" export DATA_DIR="./dicoo" accelerate launch textual_inversion.py \ --pretrained_model_name_or_path=$FP32_MODEL_NAME \ --train_data_dir=$DATA_DIR \ --use_ema --learnable_property="object" \ --placeholder_token="<dicoo>" --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=300 \ --learning_rate=5.0e-04 --max_grad_norm=3 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --output_dir="int8_model" \ --do_quantization --do_distillation --verify_loading ``` After the distillation for quantization process, the quantized UNet would be 4 times smaller (3279MB -> 827MB). ## Inference Once you have trained a INT8 model with the above command, the inference can be done simply using the `text2images.py` script. Make sure to include the `placeholder_token` in your prompt. ```bash export INT8_MODEL_NAME="./int8_model" python text2images.py \ --pretrained_model_name_or_path=$INT8_MODEL_NAME \ --caption "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings." \ --images_num 4 ``` Here is the comparison of images generated by the FP32 model (left) and INT8 model (right) respectively: <p float="left"> <img src="https://huggingface.co/datasets/Intel/textual_inversion_dicoo_dfq/resolve/main/FP32.png" width = "300" height = "300" alt="FP32" align=center /> <img src="https://huggingface.co/datasets/Intel/textual_inversion_dicoo_dfq/resolve/main/INT8.png" width = "300" height = "300" alt="INT8" align=center /> </p>
diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/README.md/0
{ "file_path": "diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/README.md", "repo_id": "diffusers", "token_count": 1443 }
127
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, Optional, Tuple, Union import torch from diffusers.configuration_utils import register_to_config from diffusers.models.controlnet import ( ControlNetConditioningEmbedding, ControlNetModel, ControlNetOutput, ) from diffusers.utils import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name class PromptDiffusionControlNetModel(ControlNetModel): """ A PromptDiffusionControlNet model. Args: in_channels (`int`, defaults to 4): The number of channels in the input sample. flip_sin_to_cos (`bool`, defaults to `True`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, defaults to 0): The frequency shift to apply to the time embedding. down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`): block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, defaults to 2): The number of layers per block. downsample_padding (`int`, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, defaults to 1): The scale factor to use for the mid block. act_fn (`str`, defaults to "silu"): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If None, normalization and activation layers is skipped in post-processing. norm_eps (`float`, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int`, defaults to 1280): The dimension of the cross attention features. transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. encoder_hid_dim (`int`, *optional*, defaults to None): If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` dimension to `cross_attention_dim`. encoder_hid_dim_type (`str`, *optional*, defaults to `None`): If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. attention_head_dim (`Union[int, Tuple[int]]`, defaults to 8): The dimension of the attention heads. use_linear_projection (`bool`, defaults to `False`): class_embed_type (`str`, *optional*, defaults to `None`): The type of class embedding to use which is ultimately summed with the time embeddings. Choose from None, `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. addition_embed_type (`str`, *optional*, defaults to `None`): Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or "text". "text" will use the `TextTimeEmbedding` layer. num_class_embeds (`int`, *optional*, defaults to 0): Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing class conditioning with `class_embed_type` equal to `None`. upcast_attention (`bool`, defaults to `False`): resnet_time_scale_shift (`str`, defaults to `"default"`): Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`. projection_class_embeddings_input_dim (`int`, *optional*, defaults to `None`): The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when `class_embed_type="projection"`. controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`): The channel order of conditional image. Will convert to `rgb` if it's `bgr`. conditioning_embedding_out_channels (`tuple[int]`, *optional*, defaults to `(16, 32, 96, 256)`): The tuple of output channel for each block in the `conditioning_embedding` layer. global_pool_conditions (`bool`, defaults to `False`): TODO(Patrick) - unused parameter. addition_embed_type_num_heads (`int`, defaults to 64): The number of heads to use for the `TextTimeEmbedding` layer. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, in_channels: int = 4, conditioning_channels: int = 3, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str, ...] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ), mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn", only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), layers_per_block: int = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: int = 1280, transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, attention_head_dim: Union[int, Tuple[int, ...]] = 8, num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", projection_class_embeddings_input_dim: Optional[int] = None, controlnet_conditioning_channel_order: str = "rgb", conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256), global_pool_conditions: bool = False, addition_embed_type_num_heads: int = 64, ): super().__init__( in_channels, conditioning_channels, flip_sin_to_cos, freq_shift, down_block_types, mid_block_type, only_cross_attention, block_out_channels, layers_per_block, downsample_padding, mid_block_scale_factor, act_fn, norm_num_groups, norm_eps, cross_attention_dim, transformer_layers_per_block, encoder_hid_dim, encoder_hid_dim_type, attention_head_dim, num_attention_heads, use_linear_projection, class_embed_type, addition_embed_type, addition_time_embed_dim, num_class_embeds, upcast_attention, resnet_time_scale_shift, projection_class_embeddings_input_dim, controlnet_conditioning_channel_order, conditioning_embedding_out_channels, global_pool_conditions, addition_embed_type_num_heads, ) self.controlnet_query_cond_embedding = ControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0], block_out_channels=conditioning_embedding_out_channels, conditioning_channels=3, ) def forward( self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, controlnet_cond: torch.Tensor, controlnet_query_cond: torch.Tensor, conditioning_scale: float = 1.0, class_labels: Optional[torch.Tensor] = None, timestep_cond: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guess_mode: bool = False, return_dict: bool = True, ) -> Union[ControlNetOutput, Tuple[Tuple[torch.Tensor, ...], torch.Tensor]]: """ The [`~PromptDiffusionControlNetModel`] forward method. Args: sample (`torch.Tensor`): The noisy input tensor. timestep (`Union[torch.Tensor, float, int]`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.Tensor`): The encoder hidden states. controlnet_cond (`torch.Tensor`): The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`. controlnet_query_cond (`torch.Tensor`): The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`. conditioning_scale (`float`, defaults to `1.0`): The scale factor for ControlNet outputs. class_labels (`torch.Tensor`, *optional*, defaults to `None`): Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. timestep_cond (`torch.Tensor`, *optional*, defaults to `None`): Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep embeddings. attention_mask (`torch.Tensor`, *optional*, defaults to `None`): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. added_cond_kwargs (`dict`): Additional conditions for the Stable Diffusion XL UNet. cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`): A kwargs dictionary that if specified is passed along to the `AttnProcessor`. guess_mode (`bool`, defaults to `False`): In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended. return_dict (`bool`, defaults to `True`): Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple. Returns: [`~models.controlnet.ControlNetOutput`] **or** `tuple`: If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ # check channel order channel_order = self.config.controlnet_conditioning_channel_order if channel_order == "rgb": # in rgb order by default ... elif channel_order == "bgr": controlnet_cond = torch.flip(controlnet_cond, dims=[1]) else: raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}") # prepare attention_mask if attention_mask is not None: attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb, timestep_cond) aug_emb = None if self.class_embedding is not None: if class_labels is None: raise ValueError("class_labels should be provided when num_class_embeds > 0") if self.config.class_embed_type == "timestep": class_labels = self.time_proj(class_labels) class_emb = self.class_embedding(class_labels).to(dtype=self.dtype) emb = emb + class_emb if self.config.addition_embed_type is not None: if self.config.addition_embed_type == "text": aug_emb = self.add_embedding(encoder_hidden_states) elif self.config.addition_embed_type == "text_time": if "text_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" ) text_embeds = added_cond_kwargs.get("text_embeds") if "time_ids" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" ) time_ids = added_cond_kwargs.get("time_ids") time_embeds = self.add_time_proj(time_ids.flatten()) time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) add_embeds = add_embeds.to(emb.dtype) aug_emb = self.add_embedding(add_embeds) emb = emb + aug_emb if aug_emb is not None else emb # 2. pre-process sample = self.conv_in(sample) controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) controlnet_query_cond = self.controlnet_query_cond_embedding(controlnet_query_cond) sample = sample + controlnet_cond + controlnet_query_cond # 3. down down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, ) else: sample, res_samples = downsample_block(hidden_states=sample, temb=emb) down_block_res_samples += res_samples # 4. mid if self.mid_block is not None: if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention: sample = self.mid_block( sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, ) else: sample = self.mid_block(sample, emb) # 5. Control net blocks controlnet_down_block_res_samples = () for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks): down_block_res_sample = controlnet_block(down_block_res_sample) controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,) down_block_res_samples = controlnet_down_block_res_samples mid_block_res_sample = self.controlnet_mid_block(sample) # 6. scaling if guess_mode and not self.config.global_pool_conditions: scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0 scales = scales * conditioning_scale down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)] mid_block_res_sample = mid_block_res_sample * scales[-1] # last one else: down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample = mid_block_res_sample * conditioning_scale if self.config.global_pool_conditions: down_block_res_samples = [ torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples ] mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True) if not return_dict: return (down_block_res_samples, mid_block_res_sample) return ControlNetOutput( down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample )
diffusers/examples/research_projects/promptdiffusion/promptdiffusioncontrolnet.py/0
{ "file_path": "diffusers/examples/research_projects/promptdiffusion/promptdiffusioncontrolnet.py", "repo_id": "diffusers", "token_count": 8376 }
128
# Running Stable Diffusion 3 DreamBooth LoRA training under 16GB This is an **EDUCATIONAL** project that provides utilities for DreamBooth LoRA training for [Stable Diffusion 3 (SD3)](ttps://huggingface.co/papers/2403.03206) under 16GB GPU VRAM. This means you can successfully try out this project using a [free-tier Colab Notebook](https://colab.research.google.com/github/huggingface/diffusers/blob/main/examples/research_projects/sd3_lora_colab/sd3_dreambooth_lora_16gb.ipynb) instance. 🤗 > [!NOTE] > SD3 is gated, so you need to make sure you agree to [share your contact info](https://huggingface.co/stabilityai/stable-diffusion-3-medium-diffusers) to access the model before using it with Diffusers. Once you have access, you need to log in so your system knows you’re authorized. Use the command below to log in: ```bash huggingface-cli login ``` This will also allow us to push the trained model parameters to the Hugging Face Hub platform. For setup, inference code, and details on how to run the code, please follow the Colab Notebook provided above. ## How We make use of several techniques to make this possible: * Compute the embeddings from the instance prompt and serialize them for later reuse. This is implemented in the [`compute_embeddings.py`](./compute_embeddings.py) script. We use an 8bit (as introduced in [`LLM.int8()`](https://arxiv.org/abs/2208.07339)) T5 to reduce memory requirements to ~10.5GB. * In the `train_dreambooth_sd3_lora_miniature.py` script, we make use of: * 8bit Adam for optimization through the `bitsandbytes` library. * Gradient checkpointing and gradient accumulation. * FP16 precision. * Flash attention through `F.scaled_dot_product_attention()`. Computing the text embeddings is arguably the most memory-intensive part in the pipeline as SD3 employs three text encoders. If we run them in FP32, it will take about 20GB of VRAM. With FP16, we are down to 12GB. ## Gotchas This project is educational. It exists to showcase the possibility of fine-tuning a big diffusion system on consumer GPUs. But additional components might have to be added to obtain state-of-the-art performance. Below are some commonly known gotchas that users should be aware of: * Training of text encoders is purposefully disabled. * Techniques such as prior-preservation is unsupported. * Custom instance captions for instance images are unsupported, but this should be relatively easy to integrate. Hopefully, this project gives you a template to extend it further to suit your needs.
diffusers/examples/research_projects/sd3_lora_colab/README.md/0
{ "file_path": "diffusers/examples/research_projects/sd3_lora_colab/README.md", "repo_id": "diffusers", "token_count": 706 }
129
# Stable Diffusion XL text-to-image fine-tuning The `train_text_to_image_sdxl.py` script shows how to fine-tune Stable Diffusion XL (SDXL) on your own dataset. 🚨 This script is experimental. The script fine-tunes the whole model and often times the model overfits and runs into issues like catastrophic forgetting. It's recommended to try different hyperparameters to get the best result on your dataset. 🚨 ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` Then cd in the `examples/text_to_image` folder and run ```bash pip install -r requirements_sdxl.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell (e.g., a notebook) ```python from accelerate.utils import write_basic_config write_basic_config() ``` When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. Note also that we use PEFT library as backend for LoRA training, make sure to have `peft>=0.6.0` installed in your environment. ### Training ```bash export MODEL_NAME="stabilityai/stable-diffusion-xl-base-1.0" export VAE_NAME="madebyollin/sdxl-vae-fp16-fix" export DATASET_NAME="lambdalabs/naruto-blip-captions" accelerate launch train_text_to_image_sdxl.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --pretrained_vae_model_name_or_path=$VAE_NAME \ --dataset_name=$DATASET_NAME \ --enable_xformers_memory_efficient_attention \ --resolution=512 --center_crop --random_flip \ --proportion_empty_prompts=0.2 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 --gradient_checkpointing \ --max_train_steps=10000 \ --use_8bit_adam \ --learning_rate=1e-06 --lr_scheduler="constant" --lr_warmup_steps=0 \ --mixed_precision="fp16" \ --report_to="wandb" \ --validation_prompt="a cute Sundar Pichai creature" --validation_epochs 5 \ --checkpointing_steps=5000 \ --output_dir="sdxl-naruto-model" \ --push_to_hub ``` **Notes**: * The `train_text_to_image_sdxl.py` script pre-computes text embeddings and the VAE encodings and keeps them in memory. While for smaller datasets like [`lambdalabs/naruto-blip-captions`](https://hf.co/datasets/lambdalabs/naruto-blip-captions), it might not be a problem, it can definitely lead to memory problems when the script is used on a larger dataset. For those purposes, you would want to serialize these pre-computed representations to disk separately and load them during the fine-tuning process. Refer to [this PR](https://github.com/huggingface/diffusers/pull/4505) for a more in-depth discussion. * The training script is compute-intensive and may not run on a consumer GPU like Tesla T4. * The training command shown above performs intermediate quality validation in between the training epochs and logs the results to Weights and Biases. `--report_to`, `--validation_prompt`, and `--validation_epochs` are the relevant CLI arguments here. * SDXL's VAE is known to suffer from numerical instability issues. This is why we also expose a CLI argument namely `--pretrained_vae_model_name_or_path` that lets you specify the location of a better VAE (such as [this one](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)). ### Inference ```python from diffusers import DiffusionPipeline import torch model_path = "you-model-id-goes-here" # <-- change this pipe = DiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16) pipe.to("cuda") prompt = "A naruto with green eyes and red legs." image = pipe(prompt, num_inference_steps=30, guidance_scale=7.5).images[0] image.save("naruto.png") ``` ### Inference in Pytorch XLA ```python from diffusers import DiffusionPipeline import torch import torch_xla.core.xla_model as xm model_id = "stabilityai/stable-diffusion-xl-base-1.0" pipe = DiffusionPipeline.from_pretrained(model_id) device = xm.xla_device() pipe.to(device) prompt = "A naruto with green eyes and red legs." start = time() image = pipe(prompt, num_inference_steps=inference_steps).images[0] print(f'Compilation time is {time()-start} sec') image.save("naruto.png") start = time() image = pipe(prompt, num_inference_steps=inference_steps).images[0] print(f'Inference time is {time()-start} sec after compilation') ``` Note: There is a warmup step in PyTorch XLA. This takes longer because of compilation and optimization. To see the real benefits of Pytorch XLA and speedup, we need to call the pipe again on the input with the same length as the original prompt to reuse the optimized graph and get the performance boost. ## LoRA training example for Stable Diffusion XL (SDXL) Low-Rank Adaption of Large Language Models was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*. In a nutshell, LoRA allows adapting pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages: - Previous pretrained weights are kept frozen so that model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114). - Rank-decomposition matrices have significantly fewer parameters than original model, which means that trained LoRA weights are easily portable. - LoRA attention layers allow to control to which extent the model is adapted toward new training images via a `scale` parameter. [cloneofsimo](https://github.com/cloneofsimo) was the first to try out LoRA training for Stable Diffusion in the popular [lora](https://github.com/cloneofsimo/lora) GitHub repository. With LoRA, it's possible to fine-tune Stable Diffusion on a custom image-caption pair dataset on consumer GPUs like Tesla T4, Tesla V100. ### Training First, you need to set up your development environment as is explained in the [installation section](#installing-the-dependencies). Make sure to set the `MODEL_NAME` and `DATASET_NAME` environment variables and, optionally, the `VAE_NAME` variable. Here, we will use [Stable Diffusion XL 1.0-base](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and the [Narutos dataset](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions). **___Note: It is quite useful to monitor the training progress by regularly generating sample images during training. [Weights and Biases](https://docs.wandb.ai/quickstart) is a nice solution to easily see generating images during training. All you need to do is to run `pip install wandb` before training to automatically log images.___** ```bash export MODEL_NAME="stabilityai/stable-diffusion-xl-base-1.0" export VAE_NAME="madebyollin/sdxl-vae-fp16-fix" export DATASET_NAME="lambdalabs/naruto-blip-captions" ``` For this example we want to directly store the trained LoRA embeddings on the Hub, so we need to be logged in and add the `--push_to_hub` flag. ```bash huggingface-cli login ``` Now we can start training! ```bash accelerate launch train_text_to_image_lora_sdxl.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --pretrained_vae_model_name_or_path=$VAE_NAME \ --dataset_name=$DATASET_NAME --caption_column="text" \ --resolution=1024 --random_flip \ --train_batch_size=1 \ --num_train_epochs=2 --checkpointing_steps=500 \ --learning_rate=1e-04 --lr_scheduler="constant" --lr_warmup_steps=0 \ --mixed_precision="fp16" \ --seed=42 \ --output_dir="sd-naruto-model-lora-sdxl" \ --validation_prompt="cute dragon creature" --report_to="wandb" \ --push_to_hub ``` The above command will also run inference as fine-tuning progresses and log the results to Weights and Biases. **Notes**: * SDXL's VAE is known to suffer from numerical instability issues. This is why we also expose a CLI argument namely `--pretrained_vae_model_name_or_path` that lets you specify the location of a better VAE (such as [this one](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)). ### Using DeepSpeed Using DeepSpeed one can reduce the consumption of GPU memory, enabling the training of models on GPUs with smaller memory sizes. DeepSpeed is capable of offloading model parameters to the machine's memory, or it can distribute parameters, gradients, and optimizer states across multiple GPUs. This allows for the training of larger models under the same hardware configuration. First, you need to use the `accelerate config` command to choose to use DeepSpeed, or manually use the accelerate config file to set up DeepSpeed. Here is an example of a config file for using DeepSpeed. For more detailed explanations of the configuration, you can refer to this [link](https://huggingface.co/docs/accelerate/usage_guides/deepspeed). ```yaml compute_environment: LOCAL_MACHINE debug: true deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: none offload_param_device: none zero3_init_flag: false zero_stage: 2 distributed_type: DEEPSPEED downcast_bf16: 'no' machine_rank: 0 main_training_function: main mixed_precision: fp16 num_machines: 1 num_processes: 1 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` You need to save the mentioned configuration as an `accelerate_config.yaml` file. Then, you need to input the path of your `accelerate_config.yaml` file into the `ACCELERATE_CONFIG_FILE` parameter. This way you can use DeepSpeed to train your SDXL model in LoRA. Additionally, you can use DeepSpeed to train other SD models in this way. ```shell export MODEL_NAME="stabilityai/stable-diffusion-xl-base-1.0" export VAE_NAME="madebyollin/sdxl-vae-fp16-fix" export DATASET_NAME="lambdalabs/naruto-blip-captions" export ACCELERATE_CONFIG_FILE="your accelerate_config.yaml" accelerate launch --config_file $ACCELERATE_CONFIG_FILE train_text_to_image_lora_sdxl.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --pretrained_vae_model_name_or_path=$VAE_NAME \ --dataset_name=$DATASET_NAME --caption_column="text" \ --resolution=1024 \ --train_batch_size=1 \ --num_train_epochs=2 \ --checkpointing_steps=2 \ --learning_rate=1e-04 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --mixed_precision="fp16" \ --max_train_steps=20 \ --validation_epochs=20 \ --seed=1234 \ --output_dir="sd-naruto-model-lora-sdxl" \ --validation_prompt="cute dragon creature" ``` ### Finetuning the text encoder and UNet The script also allows you to finetune the `text_encoder` along with the `unet`. 🚨 Training the text encoder requires additional memory. Pass the `--train_text_encoder` argument to the training script to enable finetuning the `text_encoder` and `unet`: ```bash accelerate launch train_text_to_image_lora_sdxl.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_NAME --caption_column="text" \ --resolution=1024 --random_flip \ --train_batch_size=1 \ --num_train_epochs=2 --checkpointing_steps=500 \ --learning_rate=1e-04 --lr_scheduler="constant" --lr_warmup_steps=0 \ --seed=42 \ --output_dir="sd-naruto-model-lora-sdxl-txt" \ --train_text_encoder \ --validation_prompt="cute dragon creature" --report_to="wandb" \ --push_to_hub ``` ### Inference Once you have trained a model using above command, the inference can be done simply using the `DiffusionPipeline` after loading the trained LoRA weights. You need to pass the `output_dir` for loading the LoRA weights which, in this case, is `sd-naruto-model-lora-sdxl`. ```python from diffusers import DiffusionPipeline import torch model_path = "takuoko/sd-naruto-model-lora-sdxl" pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16) pipe.to("cuda") pipe.load_lora_weights(model_path) prompt = "A naruto with green eyes and red legs." image = pipe(prompt, num_inference_steps=30, guidance_scale=7.5).images[0] image.save("naruto.png") ```
diffusers/examples/text_to_image/README_sdxl.md/0
{ "file_path": "diffusers/examples/text_to_image/README_sdxl.md", "repo_id": "diffusers", "token_count": 4085 }
130
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNet2DModel, ) TEST_UNET_CONFIG = { "sample_size": 32, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": 1000, "block_out_channels": [32, 64], "attention_head_dim": 8, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "attn_norm_num_groups": 32, "upsample_type": "resnet", "downsample_type": "resnet", } IMAGENET_64_UNET_CONFIG = { "sample_size": 64, "in_channels": 3, "out_channels": 3, "layers_per_block": 3, "num_class_embeds": 1000, "block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4], "attention_head_dim": 64, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "attn_norm_num_groups": 32, "upsample_type": "resnet", "downsample_type": "resnet", } LSUN_256_UNET_CONFIG = { "sample_size": 256, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": None, "block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], "attention_head_dim": 64, "down_block_types": [ "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "default", "upsample_type": "resnet", "downsample_type": "resnet", } CD_SCHEDULER_CONFIG = { "num_train_timesteps": 40, "sigma_min": 0.002, "sigma_max": 80.0, } CT_IMAGENET_64_SCHEDULER_CONFIG = { "num_train_timesteps": 201, "sigma_min": 0.002, "sigma_max": 80.0, } CT_LSUN_256_SCHEDULER_CONFIG = { "num_train_timesteps": 151, "sigma_min": 0.002, "sigma_max": 80.0, } def str2bool(v): """ https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse """ if isinstance(v, bool): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError("boolean value expected") def convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix, has_skip=False): new_checkpoint[f"{new_prefix}.norm1.weight"] = checkpoint[f"{old_prefix}.in_layers.0.weight"] new_checkpoint[f"{new_prefix}.norm1.bias"] = checkpoint[f"{old_prefix}.in_layers.0.bias"] new_checkpoint[f"{new_prefix}.conv1.weight"] = checkpoint[f"{old_prefix}.in_layers.2.weight"] new_checkpoint[f"{new_prefix}.conv1.bias"] = checkpoint[f"{old_prefix}.in_layers.2.bias"] new_checkpoint[f"{new_prefix}.time_emb_proj.weight"] = checkpoint[f"{old_prefix}.emb_layers.1.weight"] new_checkpoint[f"{new_prefix}.time_emb_proj.bias"] = checkpoint[f"{old_prefix}.emb_layers.1.bias"] new_checkpoint[f"{new_prefix}.norm2.weight"] = checkpoint[f"{old_prefix}.out_layers.0.weight"] new_checkpoint[f"{new_prefix}.norm2.bias"] = checkpoint[f"{old_prefix}.out_layers.0.bias"] new_checkpoint[f"{new_prefix}.conv2.weight"] = checkpoint[f"{old_prefix}.out_layers.3.weight"] new_checkpoint[f"{new_prefix}.conv2.bias"] = checkpoint[f"{old_prefix}.out_layers.3.bias"] if has_skip: new_checkpoint[f"{new_prefix}.conv_shortcut.weight"] = checkpoint[f"{old_prefix}.skip_connection.weight"] new_checkpoint[f"{new_prefix}.conv_shortcut.bias"] = checkpoint[f"{old_prefix}.skip_connection.bias"] return new_checkpoint def convert_attention(checkpoint, new_checkpoint, old_prefix, new_prefix, attention_dim=None): weight_q, weight_k, weight_v = checkpoint[f"{old_prefix}.qkv.weight"].chunk(3, dim=0) bias_q, bias_k, bias_v = checkpoint[f"{old_prefix}.qkv.bias"].chunk(3, dim=0) new_checkpoint[f"{new_prefix}.group_norm.weight"] = checkpoint[f"{old_prefix}.norm.weight"] new_checkpoint[f"{new_prefix}.group_norm.bias"] = checkpoint[f"{old_prefix}.norm.bias"] new_checkpoint[f"{new_prefix}.to_q.weight"] = weight_q.squeeze(-1).squeeze(-1) new_checkpoint[f"{new_prefix}.to_q.bias"] = bias_q.squeeze(-1).squeeze(-1) new_checkpoint[f"{new_prefix}.to_k.weight"] = weight_k.squeeze(-1).squeeze(-1) new_checkpoint[f"{new_prefix}.to_k.bias"] = bias_k.squeeze(-1).squeeze(-1) new_checkpoint[f"{new_prefix}.to_v.weight"] = weight_v.squeeze(-1).squeeze(-1) new_checkpoint[f"{new_prefix}.to_v.bias"] = bias_v.squeeze(-1).squeeze(-1) new_checkpoint[f"{new_prefix}.to_out.0.weight"] = ( checkpoint[f"{old_prefix}.proj_out.weight"].squeeze(-1).squeeze(-1) ) new_checkpoint[f"{new_prefix}.to_out.0.bias"] = checkpoint[f"{old_prefix}.proj_out.bias"].squeeze(-1).squeeze(-1) return new_checkpoint def con_pt_to_diffuser(checkpoint_path: str, unet_config): checkpoint = torch.load(checkpoint_path, map_location="cpu") new_checkpoint = {} new_checkpoint["time_embedding.linear_1.weight"] = checkpoint["time_embed.0.weight"] new_checkpoint["time_embedding.linear_1.bias"] = checkpoint["time_embed.0.bias"] new_checkpoint["time_embedding.linear_2.weight"] = checkpoint["time_embed.2.weight"] new_checkpoint["time_embedding.linear_2.bias"] = checkpoint["time_embed.2.bias"] if unet_config["num_class_embeds"] is not None: new_checkpoint["class_embedding.weight"] = checkpoint["label_emb.weight"] new_checkpoint["conv_in.weight"] = checkpoint["input_blocks.0.0.weight"] new_checkpoint["conv_in.bias"] = checkpoint["input_blocks.0.0.bias"] down_block_types = unet_config["down_block_types"] layers_per_block = unet_config["layers_per_block"] attention_head_dim = unet_config["attention_head_dim"] channels_list = unet_config["block_out_channels"] current_layer = 1 prev_channels = channels_list[0] for i, layer_type in enumerate(down_block_types): current_channels = channels_list[i] downsample_block_has_skip = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(layers_per_block): new_prefix = f"down_blocks.{i}.resnets.{j}" old_prefix = f"input_blocks.{current_layer}.0" has_skip = True if j == 0 and downsample_block_has_skip else False new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix, has_skip=has_skip) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(layers_per_block): new_prefix = f"down_blocks.{i}.resnets.{j}" old_prefix = f"input_blocks.{current_layer}.0" has_skip = True if j == 0 and downsample_block_has_skip else False new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix, has_skip=has_skip) new_prefix = f"down_blocks.{i}.attentions.{j}" old_prefix = f"input_blocks.{current_layer}.1" new_checkpoint = convert_attention( checkpoint, new_checkpoint, old_prefix, new_prefix, attention_head_dim ) current_layer += 1 if i != len(down_block_types) - 1: new_prefix = f"down_blocks.{i}.downsamplers.0" old_prefix = f"input_blocks.{current_layer}.0" new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix) current_layer += 1 prev_channels = current_channels # hardcoded the mid-block for now new_prefix = "mid_block.resnets.0" old_prefix = "middle_block.0" new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix) new_prefix = "mid_block.attentions.0" old_prefix = "middle_block.1" new_checkpoint = convert_attention(checkpoint, new_checkpoint, old_prefix, new_prefix, attention_head_dim) new_prefix = "mid_block.resnets.1" old_prefix = "middle_block.2" new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix) current_layer = 0 up_block_types = unet_config["up_block_types"] for i, layer_type in enumerate(up_block_types): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1): new_prefix = f"up_blocks.{i}.resnets.{j}" old_prefix = f"output_blocks.{current_layer}.0" new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix, has_skip=True) current_layer += 1 if i != len(up_block_types) - 1: new_prefix = f"up_blocks.{i}.upsamplers.0" old_prefix = f"output_blocks.{current_layer-1}.1" new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1): new_prefix = f"up_blocks.{i}.resnets.{j}" old_prefix = f"output_blocks.{current_layer}.0" new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix, has_skip=True) new_prefix = f"up_blocks.{i}.attentions.{j}" old_prefix = f"output_blocks.{current_layer}.1" new_checkpoint = convert_attention( checkpoint, new_checkpoint, old_prefix, new_prefix, attention_head_dim ) current_layer += 1 if i != len(up_block_types) - 1: new_prefix = f"up_blocks.{i}.upsamplers.0" old_prefix = f"output_blocks.{current_layer-1}.2" new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix) new_checkpoint["conv_norm_out.weight"] = checkpoint["out.0.weight"] new_checkpoint["conv_norm_out.bias"] = checkpoint["out.0.bias"] new_checkpoint["conv_out.weight"] = checkpoint["out.2.weight"] new_checkpoint["conv_out.bias"] = checkpoint["out.2.bias"] return new_checkpoint if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.") parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model." ) parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.") args = parser.parse_args() args.class_cond = str2bool(args.class_cond) ckpt_name = os.path.basename(args.unet_path) print(f"Checkpoint: {ckpt_name}") # Get U-Net config if "imagenet64" in ckpt_name: unet_config = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): unet_config = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: unet_config = TEST_UNET_CONFIG else: raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.") if not args.class_cond: unet_config["num_class_embeds"] = None converted_unet_ckpt = con_pt_to_diffuser(args.unet_path, unet_config) image_unet = UNet2DModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: scheduler_config = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: scheduler_config = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): scheduler_config = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.") cm_scheduler = CMStochasticIterativeScheduler(**scheduler_config) consistency_model = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
diffusers/scripts/convert_consistency_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_consistency_to_diffusers.py", "repo_id": "diffusers", "token_count": 5773 }
131
import argparse import os import tempfile import torch from accelerate import load_checkpoint_and_dispatch from diffusers import UNet2DConditionModel from diffusers.models.transformers.prior_transformer import PriorTransformer from diffusers.models.vq_model import VQModel """ Example - From the diffusers root directory: Download weights: ```sh $ wget https://huggingface.co/ai-forever/Kandinsky_2.1/blob/main/prior_fp16.ckpt ``` Convert the model: ```sh python scripts/convert_kandinsky_to_diffusers.py \ --prior_checkpoint_path /home/yiyi_huggingface_co/Kandinsky-2/checkpoints_Kandinsky_2.1/prior_fp16.ckpt \ --clip_stat_path /home/yiyi_huggingface_co/Kandinsky-2/checkpoints_Kandinsky_2.1/ViT-L-14_stats.th \ --text2img_checkpoint_path /home/yiyi_huggingface_co/Kandinsky-2/checkpoints_Kandinsky_2.1/decoder_fp16.ckpt \ --inpaint_text2img_checkpoint_path /home/yiyi_huggingface_co/Kandinsky-2/checkpoints_Kandinsky_2.1/inpainting_fp16.ckpt \ --movq_checkpoint_path /home/yiyi_huggingface_co/Kandinsky-2/checkpoints_Kandinsky_2.1/movq_final.ckpt \ --dump_path /home/yiyi_huggingface_co/dump \ --debug decoder ``` """ # prior PRIOR_ORIGINAL_PREFIX = "model" # Uses default arguments PRIOR_CONFIG = {} def prior_model_from_original_config(): model = PriorTransformer(**PRIOR_CONFIG) return model def prior_original_checkpoint_to_diffusers_checkpoint(model, checkpoint, clip_stats_checkpoint): diffusers_checkpoint = {} # <original>.time_embed.0 -> <diffusers>.time_embedding.linear_1 diffusers_checkpoint.update( { "time_embedding.linear_1.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.0.weight"], "time_embedding.linear_1.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.0.bias"], } ) # <original>.clip_img_proj -> <diffusers>.proj_in diffusers_checkpoint.update( { "proj_in.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.clip_img_proj.weight"], "proj_in.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.clip_img_proj.bias"], } ) # <original>.text_emb_proj -> <diffusers>.embedding_proj diffusers_checkpoint.update( { "embedding_proj.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_emb_proj.weight"], "embedding_proj.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_emb_proj.bias"], } ) # <original>.text_enc_proj -> <diffusers>.encoder_hidden_states_proj diffusers_checkpoint.update( { "encoder_hidden_states_proj.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_enc_proj.weight"], "encoder_hidden_states_proj.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_enc_proj.bias"], } ) # <original>.positional_embedding -> <diffusers>.positional_embedding diffusers_checkpoint.update({"positional_embedding": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.positional_embedding"]}) # <original>.prd_emb -> <diffusers>.prd_embedding diffusers_checkpoint.update({"prd_embedding": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.prd_emb"]}) # <original>.time_embed.2 -> <diffusers>.time_embedding.linear_2 diffusers_checkpoint.update( { "time_embedding.linear_2.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.2.weight"], "time_embedding.linear_2.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.2.bias"], } ) # <original>.resblocks.<x> -> <diffusers>.transformer_blocks.<x> for idx in range(len(model.transformer_blocks)): diffusers_transformer_prefix = f"transformer_blocks.{idx}" original_transformer_prefix = f"{PRIOR_ORIGINAL_PREFIX}.transformer.resblocks.{idx}" # <original>.attn -> <diffusers>.attn1 diffusers_attention_prefix = f"{diffusers_transformer_prefix}.attn1" original_attention_prefix = f"{original_transformer_prefix}.attn" diffusers_checkpoint.update( prior_attention_to_diffusers( checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, original_attention_prefix=original_attention_prefix, attention_head_dim=model.attention_head_dim, ) ) # <original>.mlp -> <diffusers>.ff diffusers_ff_prefix = f"{diffusers_transformer_prefix}.ff" original_ff_prefix = f"{original_transformer_prefix}.mlp" diffusers_checkpoint.update( prior_ff_to_diffusers( checkpoint, diffusers_ff_prefix=diffusers_ff_prefix, original_ff_prefix=original_ff_prefix ) ) # <original>.ln_1 -> <diffusers>.norm1 diffusers_checkpoint.update( { f"{diffusers_transformer_prefix}.norm1.weight": checkpoint[ f"{original_transformer_prefix}.ln_1.weight" ], f"{diffusers_transformer_prefix}.norm1.bias": checkpoint[f"{original_transformer_prefix}.ln_1.bias"], } ) # <original>.ln_2 -> <diffusers>.norm3 diffusers_checkpoint.update( { f"{diffusers_transformer_prefix}.norm3.weight": checkpoint[ f"{original_transformer_prefix}.ln_2.weight" ], f"{diffusers_transformer_prefix}.norm3.bias": checkpoint[f"{original_transformer_prefix}.ln_2.bias"], } ) # <original>.final_ln -> <diffusers>.norm_out diffusers_checkpoint.update( { "norm_out.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.final_ln.weight"], "norm_out.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.final_ln.bias"], } ) # <original>.out_proj -> <diffusers>.proj_to_clip_embeddings diffusers_checkpoint.update( { "proj_to_clip_embeddings.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.out_proj.weight"], "proj_to_clip_embeddings.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.out_proj.bias"], } ) # clip stats clip_mean, clip_std = clip_stats_checkpoint clip_mean = clip_mean[None, :] clip_std = clip_std[None, :] diffusers_checkpoint.update({"clip_mean": clip_mean, "clip_std": clip_std}) return diffusers_checkpoint def prior_attention_to_diffusers( checkpoint, *, diffusers_attention_prefix, original_attention_prefix, attention_head_dim ): diffusers_checkpoint = {} # <original>.c_qkv -> <diffusers>.{to_q, to_k, to_v} [q_weight, k_weight, v_weight], [q_bias, k_bias, v_bias] = split_attentions( weight=checkpoint[f"{original_attention_prefix}.c_qkv.weight"], bias=checkpoint[f"{original_attention_prefix}.c_qkv.bias"], split=3, chunk_size=attention_head_dim, ) diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.to_q.weight": q_weight, f"{diffusers_attention_prefix}.to_q.bias": q_bias, f"{diffusers_attention_prefix}.to_k.weight": k_weight, f"{diffusers_attention_prefix}.to_k.bias": k_bias, f"{diffusers_attention_prefix}.to_v.weight": v_weight, f"{diffusers_attention_prefix}.to_v.bias": v_bias, } ) # <original>.c_proj -> <diffusers>.to_out.0 diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.to_out.0.weight": checkpoint[f"{original_attention_prefix}.c_proj.weight"], f"{diffusers_attention_prefix}.to_out.0.bias": checkpoint[f"{original_attention_prefix}.c_proj.bias"], } ) return diffusers_checkpoint def prior_ff_to_diffusers(checkpoint, *, diffusers_ff_prefix, original_ff_prefix): diffusers_checkpoint = { # <original>.c_fc -> <diffusers>.net.0.proj f"{diffusers_ff_prefix}.net.{0}.proj.weight": checkpoint[f"{original_ff_prefix}.c_fc.weight"], f"{diffusers_ff_prefix}.net.{0}.proj.bias": checkpoint[f"{original_ff_prefix}.c_fc.bias"], # <original>.c_proj -> <diffusers>.net.2 f"{diffusers_ff_prefix}.net.{2}.weight": checkpoint[f"{original_ff_prefix}.c_proj.weight"], f"{diffusers_ff_prefix}.net.{2}.bias": checkpoint[f"{original_ff_prefix}.c_proj.bias"], } return diffusers_checkpoint # done prior # unet # We are hardcoding the model configuration for now. If we need to generalize to more model configurations, we can # update then. UNET_CONFIG = { "act_fn": "silu", "addition_embed_type": "text_image", "addition_embed_type_num_heads": 64, "attention_head_dim": 64, "block_out_channels": [384, 768, 1152, 1536], "center_input_sample": False, "class_embed_type": None, "class_embeddings_concat": False, "conv_in_kernel": 3, "conv_out_kernel": 3, "cross_attention_dim": 768, "cross_attention_norm": None, "down_block_types": [ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", "SimpleCrossAttnDownBlock2D", "SimpleCrossAttnDownBlock2D", ], "downsample_padding": 1, "dual_cross_attention": False, "encoder_hid_dim": 1024, "encoder_hid_dim_type": "text_image_proj", "flip_sin_to_cos": True, "freq_shift": 0, "in_channels": 4, "layers_per_block": 3, "mid_block_only_cross_attention": None, "mid_block_scale_factor": 1, "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "norm_eps": 1e-05, "norm_num_groups": 32, "num_class_embeds": None, "only_cross_attention": False, "out_channels": 8, "projection_class_embeddings_input_dim": None, "resnet_out_scale_factor": 1.0, "resnet_skip_time_act": False, "resnet_time_scale_shift": "scale_shift", "sample_size": 64, "time_cond_proj_dim": None, "time_embedding_act_fn": None, "time_embedding_dim": None, "time_embedding_type": "positional", "timestep_post_act": None, "up_block_types": [ "SimpleCrossAttnUpBlock2D", "SimpleCrossAttnUpBlock2D", "SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D", ], "upcast_attention": False, "use_linear_projection": False, } def unet_model_from_original_config(): model = UNet2DConditionModel(**UNET_CONFIG) return model def unet_original_checkpoint_to_diffusers_checkpoint(model, checkpoint): diffusers_checkpoint = {} num_head_channels = UNET_CONFIG["attention_head_dim"] diffusers_checkpoint.update(unet_time_embeddings(checkpoint)) diffusers_checkpoint.update(unet_conv_in(checkpoint)) diffusers_checkpoint.update(unet_add_embedding(checkpoint)) diffusers_checkpoint.update(unet_encoder_hid_proj(checkpoint)) # <original>.input_blocks -> <diffusers>.down_blocks original_down_block_idx = 1 for diffusers_down_block_idx in range(len(model.down_blocks)): checkpoint_update, num_original_down_blocks = unet_downblock_to_diffusers_checkpoint( model, checkpoint, diffusers_down_block_idx=diffusers_down_block_idx, original_down_block_idx=original_down_block_idx, num_head_channels=num_head_channels, ) original_down_block_idx += num_original_down_blocks diffusers_checkpoint.update(checkpoint_update) # done <original>.input_blocks -> <diffusers>.down_blocks diffusers_checkpoint.update( unet_midblock_to_diffusers_checkpoint( model, checkpoint, num_head_channels=num_head_channels, ) ) # <original>.output_blocks -> <diffusers>.up_blocks original_up_block_idx = 0 for diffusers_up_block_idx in range(len(model.up_blocks)): checkpoint_update, num_original_up_blocks = unet_upblock_to_diffusers_checkpoint( model, checkpoint, diffusers_up_block_idx=diffusers_up_block_idx, original_up_block_idx=original_up_block_idx, num_head_channels=num_head_channels, ) original_up_block_idx += num_original_up_blocks diffusers_checkpoint.update(checkpoint_update) # done <original>.output_blocks -> <diffusers>.up_blocks diffusers_checkpoint.update(unet_conv_norm_out(checkpoint)) diffusers_checkpoint.update(unet_conv_out(checkpoint)) return diffusers_checkpoint # done unet # inpaint unet # We are hardcoding the model configuration for now. If we need to generalize to more model configurations, we can # update then. INPAINT_UNET_CONFIG = { "act_fn": "silu", "addition_embed_type": "text_image", "addition_embed_type_num_heads": 64, "attention_head_dim": 64, "block_out_channels": [384, 768, 1152, 1536], "center_input_sample": False, "class_embed_type": None, "class_embeddings_concat": None, "conv_in_kernel": 3, "conv_out_kernel": 3, "cross_attention_dim": 768, "cross_attention_norm": None, "down_block_types": [ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", "SimpleCrossAttnDownBlock2D", "SimpleCrossAttnDownBlock2D", ], "downsample_padding": 1, "dual_cross_attention": False, "encoder_hid_dim": 1024, "encoder_hid_dim_type": "text_image_proj", "flip_sin_to_cos": True, "freq_shift": 0, "in_channels": 9, "layers_per_block": 3, "mid_block_only_cross_attention": None, "mid_block_scale_factor": 1, "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "norm_eps": 1e-05, "norm_num_groups": 32, "num_class_embeds": None, "only_cross_attention": False, "out_channels": 8, "projection_class_embeddings_input_dim": None, "resnet_out_scale_factor": 1.0, "resnet_skip_time_act": False, "resnet_time_scale_shift": "scale_shift", "sample_size": 64, "time_cond_proj_dim": None, "time_embedding_act_fn": None, "time_embedding_dim": None, "time_embedding_type": "positional", "timestep_post_act": None, "up_block_types": [ "SimpleCrossAttnUpBlock2D", "SimpleCrossAttnUpBlock2D", "SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D", ], "upcast_attention": False, "use_linear_projection": False, } def inpaint_unet_model_from_original_config(): model = UNet2DConditionModel(**INPAINT_UNET_CONFIG) return model def inpaint_unet_original_checkpoint_to_diffusers_checkpoint(model, checkpoint): diffusers_checkpoint = {} num_head_channels = INPAINT_UNET_CONFIG["attention_head_dim"] diffusers_checkpoint.update(unet_time_embeddings(checkpoint)) diffusers_checkpoint.update(unet_conv_in(checkpoint)) diffusers_checkpoint.update(unet_add_embedding(checkpoint)) diffusers_checkpoint.update(unet_encoder_hid_proj(checkpoint)) # <original>.input_blocks -> <diffusers>.down_blocks original_down_block_idx = 1 for diffusers_down_block_idx in range(len(model.down_blocks)): checkpoint_update, num_original_down_blocks = unet_downblock_to_diffusers_checkpoint( model, checkpoint, diffusers_down_block_idx=diffusers_down_block_idx, original_down_block_idx=original_down_block_idx, num_head_channels=num_head_channels, ) original_down_block_idx += num_original_down_blocks diffusers_checkpoint.update(checkpoint_update) # done <original>.input_blocks -> <diffusers>.down_blocks diffusers_checkpoint.update( unet_midblock_to_diffusers_checkpoint( model, checkpoint, num_head_channels=num_head_channels, ) ) # <original>.output_blocks -> <diffusers>.up_blocks original_up_block_idx = 0 for diffusers_up_block_idx in range(len(model.up_blocks)): checkpoint_update, num_original_up_blocks = unet_upblock_to_diffusers_checkpoint( model, checkpoint, diffusers_up_block_idx=diffusers_up_block_idx, original_up_block_idx=original_up_block_idx, num_head_channels=num_head_channels, ) original_up_block_idx += num_original_up_blocks diffusers_checkpoint.update(checkpoint_update) # done <original>.output_blocks -> <diffusers>.up_blocks diffusers_checkpoint.update(unet_conv_norm_out(checkpoint)) diffusers_checkpoint.update(unet_conv_out(checkpoint)) return diffusers_checkpoint # done inpaint unet # unet utils # <original>.time_embed -> <diffusers>.time_embedding def unet_time_embeddings(checkpoint): diffusers_checkpoint = {} diffusers_checkpoint.update( { "time_embedding.linear_1.weight": checkpoint["time_embed.0.weight"], "time_embedding.linear_1.bias": checkpoint["time_embed.0.bias"], "time_embedding.linear_2.weight": checkpoint["time_embed.2.weight"], "time_embedding.linear_2.bias": checkpoint["time_embed.2.bias"], } ) return diffusers_checkpoint # <original>.input_blocks.0 -> <diffusers>.conv_in def unet_conv_in(checkpoint): diffusers_checkpoint = {} diffusers_checkpoint.update( { "conv_in.weight": checkpoint["input_blocks.0.0.weight"], "conv_in.bias": checkpoint["input_blocks.0.0.bias"], } ) return diffusers_checkpoint def unet_add_embedding(checkpoint): diffusers_checkpoint = {} diffusers_checkpoint.update( { "add_embedding.text_norm.weight": checkpoint["ln_model_n.weight"], "add_embedding.text_norm.bias": checkpoint["ln_model_n.bias"], "add_embedding.text_proj.weight": checkpoint["proj_n.weight"], "add_embedding.text_proj.bias": checkpoint["proj_n.bias"], "add_embedding.image_proj.weight": checkpoint["img_layer.weight"], "add_embedding.image_proj.bias": checkpoint["img_layer.bias"], } ) return diffusers_checkpoint def unet_encoder_hid_proj(checkpoint): diffusers_checkpoint = {} diffusers_checkpoint.update( { "encoder_hid_proj.image_embeds.weight": checkpoint["clip_to_seq.weight"], "encoder_hid_proj.image_embeds.bias": checkpoint["clip_to_seq.bias"], "encoder_hid_proj.text_proj.weight": checkpoint["to_model_dim_n.weight"], "encoder_hid_proj.text_proj.bias": checkpoint["to_model_dim_n.bias"], } ) return diffusers_checkpoint # <original>.out.0 -> <diffusers>.conv_norm_out def unet_conv_norm_out(checkpoint): diffusers_checkpoint = {} diffusers_checkpoint.update( { "conv_norm_out.weight": checkpoint["out.0.weight"], "conv_norm_out.bias": checkpoint["out.0.bias"], } ) return diffusers_checkpoint # <original>.out.2 -> <diffusers>.conv_out def unet_conv_out(checkpoint): diffusers_checkpoint = {} diffusers_checkpoint.update( { "conv_out.weight": checkpoint["out.2.weight"], "conv_out.bias": checkpoint["out.2.bias"], } ) return diffusers_checkpoint # <original>.input_blocks -> <diffusers>.down_blocks def unet_downblock_to_diffusers_checkpoint( model, checkpoint, *, diffusers_down_block_idx, original_down_block_idx, num_head_channels ): diffusers_checkpoint = {} diffusers_resnet_prefix = f"down_blocks.{diffusers_down_block_idx}.resnets" original_down_block_prefix = "input_blocks" down_block = model.down_blocks[diffusers_down_block_idx] num_resnets = len(down_block.resnets) if down_block.downsamplers is None: downsampler = False else: assert len(down_block.downsamplers) == 1 downsampler = True # The downsample block is also a resnet num_resnets += 1 for resnet_idx_inc in range(num_resnets): full_resnet_prefix = f"{original_down_block_prefix}.{original_down_block_idx + resnet_idx_inc}.0" if downsampler and resnet_idx_inc == num_resnets - 1: # this is a downsample block full_diffusers_resnet_prefix = f"down_blocks.{diffusers_down_block_idx}.downsamplers.0" else: # this is a regular resnet block full_diffusers_resnet_prefix = f"{diffusers_resnet_prefix}.{resnet_idx_inc}" diffusers_checkpoint.update( resnet_to_diffusers_checkpoint( checkpoint, resnet_prefix=full_resnet_prefix, diffusers_resnet_prefix=full_diffusers_resnet_prefix ) ) if hasattr(down_block, "attentions"): num_attentions = len(down_block.attentions) diffusers_attention_prefix = f"down_blocks.{diffusers_down_block_idx}.attentions" for attention_idx_inc in range(num_attentions): full_attention_prefix = f"{original_down_block_prefix}.{original_down_block_idx + attention_idx_inc}.1" full_diffusers_attention_prefix = f"{diffusers_attention_prefix}.{attention_idx_inc}" diffusers_checkpoint.update( attention_to_diffusers_checkpoint( checkpoint, attention_prefix=full_attention_prefix, diffusers_attention_prefix=full_diffusers_attention_prefix, num_head_channels=num_head_channels, ) ) num_original_down_blocks = num_resnets return diffusers_checkpoint, num_original_down_blocks # <original>.middle_block -> <diffusers>.mid_block def unet_midblock_to_diffusers_checkpoint(model, checkpoint, *, num_head_channels): diffusers_checkpoint = {} # block 0 original_block_idx = 0 diffusers_checkpoint.update( resnet_to_diffusers_checkpoint( checkpoint, diffusers_resnet_prefix="mid_block.resnets.0", resnet_prefix=f"middle_block.{original_block_idx}", ) ) original_block_idx += 1 # optional block 1 if hasattr(model.mid_block, "attentions") and model.mid_block.attentions[0] is not None: diffusers_checkpoint.update( attention_to_diffusers_checkpoint( checkpoint, diffusers_attention_prefix="mid_block.attentions.0", attention_prefix=f"middle_block.{original_block_idx}", num_head_channels=num_head_channels, ) ) original_block_idx += 1 # block 1 or block 2 diffusers_checkpoint.update( resnet_to_diffusers_checkpoint( checkpoint, diffusers_resnet_prefix="mid_block.resnets.1", resnet_prefix=f"middle_block.{original_block_idx}", ) ) return diffusers_checkpoint # <original>.output_blocks -> <diffusers>.up_blocks def unet_upblock_to_diffusers_checkpoint( model, checkpoint, *, diffusers_up_block_idx, original_up_block_idx, num_head_channels ): diffusers_checkpoint = {} diffusers_resnet_prefix = f"up_blocks.{diffusers_up_block_idx}.resnets" original_up_block_prefix = "output_blocks" up_block = model.up_blocks[diffusers_up_block_idx] num_resnets = len(up_block.resnets) if up_block.upsamplers is None: upsampler = False else: assert len(up_block.upsamplers) == 1 upsampler = True # The upsample block is also a resnet num_resnets += 1 has_attentions = hasattr(up_block, "attentions") for resnet_idx_inc in range(num_resnets): if upsampler and resnet_idx_inc == num_resnets - 1: # this is an upsample block if has_attentions: # There is a middle attention block that we skip original_resnet_block_idx = 2 else: original_resnet_block_idx = 1 # we add the `minus 1` because the last two resnets are stuck together in the same output block full_resnet_prefix = ( f"{original_up_block_prefix}.{original_up_block_idx + resnet_idx_inc - 1}.{original_resnet_block_idx}" ) full_diffusers_resnet_prefix = f"up_blocks.{diffusers_up_block_idx}.upsamplers.0" else: # this is a regular resnet block full_resnet_prefix = f"{original_up_block_prefix}.{original_up_block_idx + resnet_idx_inc}.0" full_diffusers_resnet_prefix = f"{diffusers_resnet_prefix}.{resnet_idx_inc}" diffusers_checkpoint.update( resnet_to_diffusers_checkpoint( checkpoint, resnet_prefix=full_resnet_prefix, diffusers_resnet_prefix=full_diffusers_resnet_prefix ) ) if has_attentions: num_attentions = len(up_block.attentions) diffusers_attention_prefix = f"up_blocks.{diffusers_up_block_idx}.attentions" for attention_idx_inc in range(num_attentions): full_attention_prefix = f"{original_up_block_prefix}.{original_up_block_idx + attention_idx_inc}.1" full_diffusers_attention_prefix = f"{diffusers_attention_prefix}.{attention_idx_inc}" diffusers_checkpoint.update( attention_to_diffusers_checkpoint( checkpoint, attention_prefix=full_attention_prefix, diffusers_attention_prefix=full_diffusers_attention_prefix, num_head_channels=num_head_channels, ) ) num_original_down_blocks = num_resnets - 1 if upsampler else num_resnets return diffusers_checkpoint, num_original_down_blocks def resnet_to_diffusers_checkpoint(checkpoint, *, diffusers_resnet_prefix, resnet_prefix): diffusers_checkpoint = { f"{diffusers_resnet_prefix}.norm1.weight": checkpoint[f"{resnet_prefix}.in_layers.0.weight"], f"{diffusers_resnet_prefix}.norm1.bias": checkpoint[f"{resnet_prefix}.in_layers.0.bias"], f"{diffusers_resnet_prefix}.conv1.weight": checkpoint[f"{resnet_prefix}.in_layers.2.weight"], f"{diffusers_resnet_prefix}.conv1.bias": checkpoint[f"{resnet_prefix}.in_layers.2.bias"], f"{diffusers_resnet_prefix}.time_emb_proj.weight": checkpoint[f"{resnet_prefix}.emb_layers.1.weight"], f"{diffusers_resnet_prefix}.time_emb_proj.bias": checkpoint[f"{resnet_prefix}.emb_layers.1.bias"], f"{diffusers_resnet_prefix}.norm2.weight": checkpoint[f"{resnet_prefix}.out_layers.0.weight"], f"{diffusers_resnet_prefix}.norm2.bias": checkpoint[f"{resnet_prefix}.out_layers.0.bias"], f"{diffusers_resnet_prefix}.conv2.weight": checkpoint[f"{resnet_prefix}.out_layers.3.weight"], f"{diffusers_resnet_prefix}.conv2.bias": checkpoint[f"{resnet_prefix}.out_layers.3.bias"], } skip_connection_prefix = f"{resnet_prefix}.skip_connection" if f"{skip_connection_prefix}.weight" in checkpoint: diffusers_checkpoint.update( { f"{diffusers_resnet_prefix}.conv_shortcut.weight": checkpoint[f"{skip_connection_prefix}.weight"], f"{diffusers_resnet_prefix}.conv_shortcut.bias": checkpoint[f"{skip_connection_prefix}.bias"], } ) return diffusers_checkpoint def attention_to_diffusers_checkpoint(checkpoint, *, diffusers_attention_prefix, attention_prefix, num_head_channels): diffusers_checkpoint = {} # <original>.norm -> <diffusers>.group_norm diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.group_norm.weight": checkpoint[f"{attention_prefix}.norm.weight"], f"{diffusers_attention_prefix}.group_norm.bias": checkpoint[f"{attention_prefix}.norm.bias"], } ) # <original>.qkv -> <diffusers>.{query, key, value} [q_weight, k_weight, v_weight], [q_bias, k_bias, v_bias] = split_attentions( weight=checkpoint[f"{attention_prefix}.qkv.weight"][:, :, 0], bias=checkpoint[f"{attention_prefix}.qkv.bias"], split=3, chunk_size=num_head_channels, ) diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.to_q.weight": q_weight, f"{diffusers_attention_prefix}.to_q.bias": q_bias, f"{diffusers_attention_prefix}.to_k.weight": k_weight, f"{diffusers_attention_prefix}.to_k.bias": k_bias, f"{diffusers_attention_prefix}.to_v.weight": v_weight, f"{diffusers_attention_prefix}.to_v.bias": v_bias, } ) # <original>.encoder_kv -> <diffusers>.{context_key, context_value} [encoder_k_weight, encoder_v_weight], [encoder_k_bias, encoder_v_bias] = split_attentions( weight=checkpoint[f"{attention_prefix}.encoder_kv.weight"][:, :, 0], bias=checkpoint[f"{attention_prefix}.encoder_kv.bias"], split=2, chunk_size=num_head_channels, ) diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.add_k_proj.weight": encoder_k_weight, f"{diffusers_attention_prefix}.add_k_proj.bias": encoder_k_bias, f"{diffusers_attention_prefix}.add_v_proj.weight": encoder_v_weight, f"{diffusers_attention_prefix}.add_v_proj.bias": encoder_v_bias, } ) # <original>.proj_out (1d conv) -> <diffusers>.proj_attn (linear) diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.to_out.0.weight": checkpoint[f"{attention_prefix}.proj_out.weight"][ :, :, 0 ], f"{diffusers_attention_prefix}.to_out.0.bias": checkpoint[f"{attention_prefix}.proj_out.bias"], } ) return diffusers_checkpoint # TODO maybe document and/or can do more efficiently (build indices in for loop and extract once for each split?) def split_attentions(*, weight, bias, split, chunk_size): weights = [None] * split biases = [None] * split weights_biases_idx = 0 for starting_row_index in range(0, weight.shape[0], chunk_size): row_indices = torch.arange(starting_row_index, starting_row_index + chunk_size) weight_rows = weight[row_indices, :] bias_rows = bias[row_indices] if weights[weights_biases_idx] is None: assert weights[weights_biases_idx] is None weights[weights_biases_idx] = weight_rows biases[weights_biases_idx] = bias_rows else: assert weights[weights_biases_idx] is not None weights[weights_biases_idx] = torch.concat([weights[weights_biases_idx], weight_rows]) biases[weights_biases_idx] = torch.concat([biases[weights_biases_idx], bias_rows]) weights_biases_idx = (weights_biases_idx + 1) % split return weights, biases # done unet utils def prior(*, args, checkpoint_map_location): print("loading prior") prior_checkpoint = torch.load(args.prior_checkpoint_path, map_location=checkpoint_map_location) clip_stats_checkpoint = torch.load(args.clip_stat_path, map_location=checkpoint_map_location) prior_model = prior_model_from_original_config() prior_diffusers_checkpoint = prior_original_checkpoint_to_diffusers_checkpoint( prior_model, prior_checkpoint, clip_stats_checkpoint ) del prior_checkpoint del clip_stats_checkpoint load_checkpoint_to_model(prior_diffusers_checkpoint, prior_model, strict=True) print("done loading prior") return prior_model def text2img(*, args, checkpoint_map_location): print("loading text2img") text2img_checkpoint = torch.load(args.text2img_checkpoint_path, map_location=checkpoint_map_location) unet_model = unet_model_from_original_config() unet_diffusers_checkpoint = unet_original_checkpoint_to_diffusers_checkpoint(unet_model, text2img_checkpoint) del text2img_checkpoint load_checkpoint_to_model(unet_diffusers_checkpoint, unet_model, strict=True) print("done loading text2img") return unet_model def inpaint_text2img(*, args, checkpoint_map_location): print("loading inpaint text2img") inpaint_text2img_checkpoint = torch.load( args.inpaint_text2img_checkpoint_path, map_location=checkpoint_map_location ) inpaint_unet_model = inpaint_unet_model_from_original_config() inpaint_unet_diffusers_checkpoint = inpaint_unet_original_checkpoint_to_diffusers_checkpoint( inpaint_unet_model, inpaint_text2img_checkpoint ) del inpaint_text2img_checkpoint load_checkpoint_to_model(inpaint_unet_diffusers_checkpoint, inpaint_unet_model, strict=True) print("done loading inpaint text2img") return inpaint_unet_model # movq MOVQ_CONFIG = { "in_channels": 3, "out_channels": 3, "latent_channels": 4, "down_block_types": ("DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D"), "up_block_types": ("AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"), "num_vq_embeddings": 16384, "block_out_channels": (128, 256, 256, 512), "vq_embed_dim": 4, "layers_per_block": 2, "norm_type": "spatial", } def movq_model_from_original_config(): movq = VQModel(**MOVQ_CONFIG) return movq def movq_encoder_to_diffusers_checkpoint(model, checkpoint): diffusers_checkpoint = {} # conv_in diffusers_checkpoint.update( { "encoder.conv_in.weight": checkpoint["encoder.conv_in.weight"], "encoder.conv_in.bias": checkpoint["encoder.conv_in.bias"], } ) # down_blocks for down_block_idx, down_block in enumerate(model.encoder.down_blocks): diffusers_down_block_prefix = f"encoder.down_blocks.{down_block_idx}" down_block_prefix = f"encoder.down.{down_block_idx}" # resnets for resnet_idx, resnet in enumerate(down_block.resnets): diffusers_resnet_prefix = f"{diffusers_down_block_prefix}.resnets.{resnet_idx}" resnet_prefix = f"{down_block_prefix}.block.{resnet_idx}" diffusers_checkpoint.update( movq_resnet_to_diffusers_checkpoint( resnet, checkpoint, diffusers_resnet_prefix=diffusers_resnet_prefix, resnet_prefix=resnet_prefix ) ) # downsample # do not include the downsample when on the last down block # There is no downsample on the last down block if down_block_idx != len(model.encoder.down_blocks) - 1: # There's a single downsample in the original checkpoint but a list of downsamples # in the diffusers model. diffusers_downsample_prefix = f"{diffusers_down_block_prefix}.downsamplers.0.conv" downsample_prefix = f"{down_block_prefix}.downsample.conv" diffusers_checkpoint.update( { f"{diffusers_downsample_prefix}.weight": checkpoint[f"{downsample_prefix}.weight"], f"{diffusers_downsample_prefix}.bias": checkpoint[f"{downsample_prefix}.bias"], } ) # attentions if hasattr(down_block, "attentions"): for attention_idx, _ in enumerate(down_block.attentions): diffusers_attention_prefix = f"{diffusers_down_block_prefix}.attentions.{attention_idx}" attention_prefix = f"{down_block_prefix}.attn.{attention_idx}" diffusers_checkpoint.update( movq_attention_to_diffusers_checkpoint( checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, attention_prefix=attention_prefix, ) ) # mid block # mid block attentions # There is a single hardcoded attention block in the middle of the VQ-diffusion encoder diffusers_attention_prefix = "encoder.mid_block.attentions.0" attention_prefix = "encoder.mid.attn_1" diffusers_checkpoint.update( movq_attention_to_diffusers_checkpoint( checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, attention_prefix=attention_prefix ) ) # mid block resnets for diffusers_resnet_idx, resnet in enumerate(model.encoder.mid_block.resnets): diffusers_resnet_prefix = f"encoder.mid_block.resnets.{diffusers_resnet_idx}" # the hardcoded prefixes to `block_` are 1 and 2 orig_resnet_idx = diffusers_resnet_idx + 1 # There are two hardcoded resnets in the middle of the VQ-diffusion encoder resnet_prefix = f"encoder.mid.block_{orig_resnet_idx}" diffusers_checkpoint.update( movq_resnet_to_diffusers_checkpoint( resnet, checkpoint, diffusers_resnet_prefix=diffusers_resnet_prefix, resnet_prefix=resnet_prefix ) ) diffusers_checkpoint.update( { # conv_norm_out "encoder.conv_norm_out.weight": checkpoint["encoder.norm_out.weight"], "encoder.conv_norm_out.bias": checkpoint["encoder.norm_out.bias"], # conv_out "encoder.conv_out.weight": checkpoint["encoder.conv_out.weight"], "encoder.conv_out.bias": checkpoint["encoder.conv_out.bias"], } ) return diffusers_checkpoint def movq_decoder_to_diffusers_checkpoint(model, checkpoint): diffusers_checkpoint = {} # conv in diffusers_checkpoint.update( { "decoder.conv_in.weight": checkpoint["decoder.conv_in.weight"], "decoder.conv_in.bias": checkpoint["decoder.conv_in.bias"], } ) # up_blocks for diffusers_up_block_idx, up_block in enumerate(model.decoder.up_blocks): # up_blocks are stored in reverse order in the VQ-diffusion checkpoint orig_up_block_idx = len(model.decoder.up_blocks) - 1 - diffusers_up_block_idx diffusers_up_block_prefix = f"decoder.up_blocks.{diffusers_up_block_idx}" up_block_prefix = f"decoder.up.{orig_up_block_idx}" # resnets for resnet_idx, resnet in enumerate(up_block.resnets): diffusers_resnet_prefix = f"{diffusers_up_block_prefix}.resnets.{resnet_idx}" resnet_prefix = f"{up_block_prefix}.block.{resnet_idx}" diffusers_checkpoint.update( movq_resnet_to_diffusers_checkpoint_spatial_norm( resnet, checkpoint, diffusers_resnet_prefix=diffusers_resnet_prefix, resnet_prefix=resnet_prefix ) ) # upsample # there is no up sample on the last up block if diffusers_up_block_idx != len(model.decoder.up_blocks) - 1: # There's a single upsample in the VQ-diffusion checkpoint but a list of downsamples # in the diffusers model. diffusers_downsample_prefix = f"{diffusers_up_block_prefix}.upsamplers.0.conv" downsample_prefix = f"{up_block_prefix}.upsample.conv" diffusers_checkpoint.update( { f"{diffusers_downsample_prefix}.weight": checkpoint[f"{downsample_prefix}.weight"], f"{diffusers_downsample_prefix}.bias": checkpoint[f"{downsample_prefix}.bias"], } ) # attentions if hasattr(up_block, "attentions"): for attention_idx, _ in enumerate(up_block.attentions): diffusers_attention_prefix = f"{diffusers_up_block_prefix}.attentions.{attention_idx}" attention_prefix = f"{up_block_prefix}.attn.{attention_idx}" diffusers_checkpoint.update( movq_attention_to_diffusers_checkpoint_spatial_norm( checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, attention_prefix=attention_prefix, ) ) # mid block # mid block attentions # There is a single hardcoded attention block in the middle of the VQ-diffusion decoder diffusers_attention_prefix = "decoder.mid_block.attentions.0" attention_prefix = "decoder.mid.attn_1" diffusers_checkpoint.update( movq_attention_to_diffusers_checkpoint_spatial_norm( checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, attention_prefix=attention_prefix ) ) # mid block resnets for diffusers_resnet_idx, resnet in enumerate(model.encoder.mid_block.resnets): diffusers_resnet_prefix = f"decoder.mid_block.resnets.{diffusers_resnet_idx}" # the hardcoded prefixes to `block_` are 1 and 2 orig_resnet_idx = diffusers_resnet_idx + 1 # There are two hardcoded resnets in the middle of the VQ-diffusion decoder resnet_prefix = f"decoder.mid.block_{orig_resnet_idx}" diffusers_checkpoint.update( movq_resnet_to_diffusers_checkpoint_spatial_norm( resnet, checkpoint, diffusers_resnet_prefix=diffusers_resnet_prefix, resnet_prefix=resnet_prefix ) ) diffusers_checkpoint.update( { # conv_norm_out "decoder.conv_norm_out.norm_layer.weight": checkpoint["decoder.norm_out.norm_layer.weight"], "decoder.conv_norm_out.norm_layer.bias": checkpoint["decoder.norm_out.norm_layer.bias"], "decoder.conv_norm_out.conv_y.weight": checkpoint["decoder.norm_out.conv_y.weight"], "decoder.conv_norm_out.conv_y.bias": checkpoint["decoder.norm_out.conv_y.bias"], "decoder.conv_norm_out.conv_b.weight": checkpoint["decoder.norm_out.conv_b.weight"], "decoder.conv_norm_out.conv_b.bias": checkpoint["decoder.norm_out.conv_b.bias"], # conv_out "decoder.conv_out.weight": checkpoint["decoder.conv_out.weight"], "decoder.conv_out.bias": checkpoint["decoder.conv_out.bias"], } ) return diffusers_checkpoint def movq_resnet_to_diffusers_checkpoint(resnet, checkpoint, *, diffusers_resnet_prefix, resnet_prefix): rv = { # norm1 f"{diffusers_resnet_prefix}.norm1.weight": checkpoint[f"{resnet_prefix}.norm1.weight"], f"{diffusers_resnet_prefix}.norm1.bias": checkpoint[f"{resnet_prefix}.norm1.bias"], # conv1 f"{diffusers_resnet_prefix}.conv1.weight": checkpoint[f"{resnet_prefix}.conv1.weight"], f"{diffusers_resnet_prefix}.conv1.bias": checkpoint[f"{resnet_prefix}.conv1.bias"], # norm2 f"{diffusers_resnet_prefix}.norm2.weight": checkpoint[f"{resnet_prefix}.norm2.weight"], f"{diffusers_resnet_prefix}.norm2.bias": checkpoint[f"{resnet_prefix}.norm2.bias"], # conv2 f"{diffusers_resnet_prefix}.conv2.weight": checkpoint[f"{resnet_prefix}.conv2.weight"], f"{diffusers_resnet_prefix}.conv2.bias": checkpoint[f"{resnet_prefix}.conv2.bias"], } if resnet.conv_shortcut is not None: rv.update( { f"{diffusers_resnet_prefix}.conv_shortcut.weight": checkpoint[f"{resnet_prefix}.nin_shortcut.weight"], f"{diffusers_resnet_prefix}.conv_shortcut.bias": checkpoint[f"{resnet_prefix}.nin_shortcut.bias"], } ) return rv def movq_resnet_to_diffusers_checkpoint_spatial_norm(resnet, checkpoint, *, diffusers_resnet_prefix, resnet_prefix): rv = { # norm1 f"{diffusers_resnet_prefix}.norm1.norm_layer.weight": checkpoint[f"{resnet_prefix}.norm1.norm_layer.weight"], f"{diffusers_resnet_prefix}.norm1.norm_layer.bias": checkpoint[f"{resnet_prefix}.norm1.norm_layer.bias"], f"{diffusers_resnet_prefix}.norm1.conv_y.weight": checkpoint[f"{resnet_prefix}.norm1.conv_y.weight"], f"{diffusers_resnet_prefix}.norm1.conv_y.bias": checkpoint[f"{resnet_prefix}.norm1.conv_y.bias"], f"{diffusers_resnet_prefix}.norm1.conv_b.weight": checkpoint[f"{resnet_prefix}.norm1.conv_b.weight"], f"{diffusers_resnet_prefix}.norm1.conv_b.bias": checkpoint[f"{resnet_prefix}.norm1.conv_b.bias"], # conv1 f"{diffusers_resnet_prefix}.conv1.weight": checkpoint[f"{resnet_prefix}.conv1.weight"], f"{diffusers_resnet_prefix}.conv1.bias": checkpoint[f"{resnet_prefix}.conv1.bias"], # norm2 f"{diffusers_resnet_prefix}.norm2.norm_layer.weight": checkpoint[f"{resnet_prefix}.norm2.norm_layer.weight"], f"{diffusers_resnet_prefix}.norm2.norm_layer.bias": checkpoint[f"{resnet_prefix}.norm2.norm_layer.bias"], f"{diffusers_resnet_prefix}.norm2.conv_y.weight": checkpoint[f"{resnet_prefix}.norm2.conv_y.weight"], f"{diffusers_resnet_prefix}.norm2.conv_y.bias": checkpoint[f"{resnet_prefix}.norm2.conv_y.bias"], f"{diffusers_resnet_prefix}.norm2.conv_b.weight": checkpoint[f"{resnet_prefix}.norm2.conv_b.weight"], f"{diffusers_resnet_prefix}.norm2.conv_b.bias": checkpoint[f"{resnet_prefix}.norm2.conv_b.bias"], # conv2 f"{diffusers_resnet_prefix}.conv2.weight": checkpoint[f"{resnet_prefix}.conv2.weight"], f"{diffusers_resnet_prefix}.conv2.bias": checkpoint[f"{resnet_prefix}.conv2.bias"], } if resnet.conv_shortcut is not None: rv.update( { f"{diffusers_resnet_prefix}.conv_shortcut.weight": checkpoint[f"{resnet_prefix}.nin_shortcut.weight"], f"{diffusers_resnet_prefix}.conv_shortcut.bias": checkpoint[f"{resnet_prefix}.nin_shortcut.bias"], } ) return rv def movq_attention_to_diffusers_checkpoint(checkpoint, *, diffusers_attention_prefix, attention_prefix): return { # norm f"{diffusers_attention_prefix}.group_norm.weight": checkpoint[f"{attention_prefix}.norm.weight"], f"{diffusers_attention_prefix}.group_norm.bias": checkpoint[f"{attention_prefix}.norm.bias"], # query f"{diffusers_attention_prefix}.to_q.weight": checkpoint[f"{attention_prefix}.q.weight"][:, :, 0, 0], f"{diffusers_attention_prefix}.to_q.bias": checkpoint[f"{attention_prefix}.q.bias"], # key f"{diffusers_attention_prefix}.to_k.weight": checkpoint[f"{attention_prefix}.k.weight"][:, :, 0, 0], f"{diffusers_attention_prefix}.to_k.bias": checkpoint[f"{attention_prefix}.k.bias"], # value f"{diffusers_attention_prefix}.to_v.weight": checkpoint[f"{attention_prefix}.v.weight"][:, :, 0, 0], f"{diffusers_attention_prefix}.to_v.bias": checkpoint[f"{attention_prefix}.v.bias"], # proj_attn f"{diffusers_attention_prefix}.to_out.0.weight": checkpoint[f"{attention_prefix}.proj_out.weight"][:, :, 0, 0], f"{diffusers_attention_prefix}.to_out.0.bias": checkpoint[f"{attention_prefix}.proj_out.bias"], } def movq_attention_to_diffusers_checkpoint_spatial_norm(checkpoint, *, diffusers_attention_prefix, attention_prefix): return { # norm f"{diffusers_attention_prefix}.spatial_norm.norm_layer.weight": checkpoint[ f"{attention_prefix}.norm.norm_layer.weight" ], f"{diffusers_attention_prefix}.spatial_norm.norm_layer.bias": checkpoint[ f"{attention_prefix}.norm.norm_layer.bias" ], f"{diffusers_attention_prefix}.spatial_norm.conv_y.weight": checkpoint[ f"{attention_prefix}.norm.conv_y.weight" ], f"{diffusers_attention_prefix}.spatial_norm.conv_y.bias": checkpoint[f"{attention_prefix}.norm.conv_y.bias"], f"{diffusers_attention_prefix}.spatial_norm.conv_b.weight": checkpoint[ f"{attention_prefix}.norm.conv_b.weight" ], f"{diffusers_attention_prefix}.spatial_norm.conv_b.bias": checkpoint[f"{attention_prefix}.norm.conv_b.bias"], # query f"{diffusers_attention_prefix}.to_q.weight": checkpoint[f"{attention_prefix}.q.weight"][:, :, 0, 0], f"{diffusers_attention_prefix}.to_q.bias": checkpoint[f"{attention_prefix}.q.bias"], # key f"{diffusers_attention_prefix}.to_k.weight": checkpoint[f"{attention_prefix}.k.weight"][:, :, 0, 0], f"{diffusers_attention_prefix}.to_k.bias": checkpoint[f"{attention_prefix}.k.bias"], # value f"{diffusers_attention_prefix}.to_v.weight": checkpoint[f"{attention_prefix}.v.weight"][:, :, 0, 0], f"{diffusers_attention_prefix}.to_v.bias": checkpoint[f"{attention_prefix}.v.bias"], # proj_attn f"{diffusers_attention_prefix}.to_out.0.weight": checkpoint[f"{attention_prefix}.proj_out.weight"][:, :, 0, 0], f"{diffusers_attention_prefix}.to_out.0.bias": checkpoint[f"{attention_prefix}.proj_out.bias"], } def movq_original_checkpoint_to_diffusers_checkpoint(model, checkpoint): diffusers_checkpoint = {} diffusers_checkpoint.update(movq_encoder_to_diffusers_checkpoint(model, checkpoint)) # quant_conv diffusers_checkpoint.update( { "quant_conv.weight": checkpoint["quant_conv.weight"], "quant_conv.bias": checkpoint["quant_conv.bias"], } ) # quantize diffusers_checkpoint.update({"quantize.embedding.weight": checkpoint["quantize.embedding.weight"]}) # post_quant_conv diffusers_checkpoint.update( { "post_quant_conv.weight": checkpoint["post_quant_conv.weight"], "post_quant_conv.bias": checkpoint["post_quant_conv.bias"], } ) # decoder diffusers_checkpoint.update(movq_decoder_to_diffusers_checkpoint(model, checkpoint)) return diffusers_checkpoint def movq(*, args, checkpoint_map_location): print("loading movq") movq_checkpoint = torch.load(args.movq_checkpoint_path, map_location=checkpoint_map_location) movq_model = movq_model_from_original_config() movq_diffusers_checkpoint = movq_original_checkpoint_to_diffusers_checkpoint(movq_model, movq_checkpoint) del movq_checkpoint load_checkpoint_to_model(movq_diffusers_checkpoint, movq_model, strict=True) print("done loading movq") return movq_model def load_checkpoint_to_model(checkpoint, model, strict=False): with tempfile.NamedTemporaryFile(delete=False) as file: torch.save(checkpoint, file.name) del checkpoint if strict: model.load_state_dict(torch.load(file.name), strict=True) else: load_checkpoint_and_dispatch(model, file.name, device_map="auto") os.remove(file.name) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--prior_checkpoint_path", default=None, type=str, required=False, help="Path to the prior checkpoint to convert.", ) parser.add_argument( "--clip_stat_path", default=None, type=str, required=False, help="Path to the clip stats checkpoint to convert.", ) parser.add_argument( "--text2img_checkpoint_path", default=None, type=str, required=False, help="Path to the text2img checkpoint to convert.", ) parser.add_argument( "--movq_checkpoint_path", default=None, type=str, required=False, help="Path to the text2img checkpoint to convert.", ) parser.add_argument( "--inpaint_text2img_checkpoint_path", default=None, type=str, required=False, help="Path to the inpaint text2img checkpoint to convert.", ) parser.add_argument( "--checkpoint_load_device", default="cpu", type=str, required=False, help="The device passed to `map_location` when loading checkpoints.", ) parser.add_argument( "--debug", default=None, type=str, required=False, help="Only run a specific stage of the convert script. Used for debugging", ) args = parser.parse_args() print(f"loading checkpoints to {args.checkpoint_load_device}") checkpoint_map_location = torch.device(args.checkpoint_load_device) if args.debug is not None: print(f"debug: only executing {args.debug}") if args.debug is None: print("to-do") elif args.debug == "prior": prior_model = prior(args=args, checkpoint_map_location=checkpoint_map_location) prior_model.save_pretrained(args.dump_path) elif args.debug == "text2img": unet_model = text2img(args=args, checkpoint_map_location=checkpoint_map_location) unet_model.save_pretrained(f"{args.dump_path}/unet") elif args.debug == "inpaint_text2img": inpaint_unet_model = inpaint_text2img(args=args, checkpoint_map_location=checkpoint_map_location) inpaint_unet_model.save_pretrained(f"{args.dump_path}/inpaint_unet") elif args.debug == "decoder": decoder = movq(args=args, checkpoint_map_location=checkpoint_map_location) decoder.save_pretrained(f"{args.dump_path}/decoder") else: raise ValueError(f"unknown debug value : {args.debug}")
diffusers/scripts/convert_kandinsky_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_kandinsky_to_diffusers.py", "repo_id": "diffusers", "token_count": 23602 }
132
import argparse from contextlib import nullcontext import safetensors.torch import torch from accelerate import init_empty_weights from diffusers import AutoencoderKL, SD3Transformer2DModel from diffusers.loaders.single_file_utils import convert_ldm_vae_checkpoint from diffusers.models.modeling_utils import load_model_dict_into_meta from diffusers.utils.import_utils import is_accelerate_available CTX = init_empty_weights if is_accelerate_available else nullcontext parser = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", type=str) parser.add_argument("--output_path", type=str) parser.add_argument("--dtype", type=str, default="fp16") args = parser.parse_args() dtype = torch.float16 if args.dtype == "fp16" else torch.float32 def load_original_checkpoint(ckpt_path): original_state_dict = safetensors.torch.load_file(ckpt_path) keys = list(original_state_dict.keys()) for k in keys: if "model.diffusion_model." in k: original_state_dict[k.replace("model.diffusion_model.", "")] = original_state_dict.pop(k) return original_state_dict # in SD3 original implementation of AdaLayerNormContinuous, it split linear projection output into shift, scale; # while in diffusers it split into scale, shift. Here we swap the linear projection weights in order to be able to use diffusers implementation def swap_scale_shift(weight, dim): shift, scale = weight.chunk(2, dim=0) new_weight = torch.cat([scale, shift], dim=0) return new_weight def convert_sd3_transformer_checkpoint_to_diffusers(original_state_dict, num_layers, caption_projection_dim): converted_state_dict = {} # Positional and patch embeddings. converted_state_dict["pos_embed.pos_embed"] = original_state_dict.pop("pos_embed") converted_state_dict["pos_embed.proj.weight"] = original_state_dict.pop("x_embedder.proj.weight") converted_state_dict["pos_embed.proj.bias"] = original_state_dict.pop("x_embedder.proj.bias") # Timestep embeddings. converted_state_dict["time_text_embed.timestep_embedder.linear_1.weight"] = original_state_dict.pop( "t_embedder.mlp.0.weight" ) converted_state_dict["time_text_embed.timestep_embedder.linear_1.bias"] = original_state_dict.pop( "t_embedder.mlp.0.bias" ) converted_state_dict["time_text_embed.timestep_embedder.linear_2.weight"] = original_state_dict.pop( "t_embedder.mlp.2.weight" ) converted_state_dict["time_text_embed.timestep_embedder.linear_2.bias"] = original_state_dict.pop( "t_embedder.mlp.2.bias" ) # Context projections. converted_state_dict["context_embedder.weight"] = original_state_dict.pop("context_embedder.weight") converted_state_dict["context_embedder.bias"] = original_state_dict.pop("context_embedder.bias") # Pooled context projection. converted_state_dict["time_text_embed.text_embedder.linear_1.weight"] = original_state_dict.pop( "y_embedder.mlp.0.weight" ) converted_state_dict["time_text_embed.text_embedder.linear_1.bias"] = original_state_dict.pop( "y_embedder.mlp.0.bias" ) converted_state_dict["time_text_embed.text_embedder.linear_2.weight"] = original_state_dict.pop( "y_embedder.mlp.2.weight" ) converted_state_dict["time_text_embed.text_embedder.linear_2.bias"] = original_state_dict.pop( "y_embedder.mlp.2.bias" ) # Transformer blocks 🎸. for i in range(num_layers): # Q, K, V sample_q, sample_k, sample_v = torch.chunk( original_state_dict.pop(f"joint_blocks.{i}.x_block.attn.qkv.weight"), 3, dim=0 ) context_q, context_k, context_v = torch.chunk( original_state_dict.pop(f"joint_blocks.{i}.context_block.attn.qkv.weight"), 3, dim=0 ) sample_q_bias, sample_k_bias, sample_v_bias = torch.chunk( original_state_dict.pop(f"joint_blocks.{i}.x_block.attn.qkv.bias"), 3, dim=0 ) context_q_bias, context_k_bias, context_v_bias = torch.chunk( original_state_dict.pop(f"joint_blocks.{i}.context_block.attn.qkv.bias"), 3, dim=0 ) converted_state_dict[f"transformer_blocks.{i}.attn.to_q.weight"] = torch.cat([sample_q]) converted_state_dict[f"transformer_blocks.{i}.attn.to_q.bias"] = torch.cat([sample_q_bias]) converted_state_dict[f"transformer_blocks.{i}.attn.to_k.weight"] = torch.cat([sample_k]) converted_state_dict[f"transformer_blocks.{i}.attn.to_k.bias"] = torch.cat([sample_k_bias]) converted_state_dict[f"transformer_blocks.{i}.attn.to_v.weight"] = torch.cat([sample_v]) converted_state_dict[f"transformer_blocks.{i}.attn.to_v.bias"] = torch.cat([sample_v_bias]) converted_state_dict[f"transformer_blocks.{i}.attn.add_q_proj.weight"] = torch.cat([context_q]) converted_state_dict[f"transformer_blocks.{i}.attn.add_q_proj.bias"] = torch.cat([context_q_bias]) converted_state_dict[f"transformer_blocks.{i}.attn.add_k_proj.weight"] = torch.cat([context_k]) converted_state_dict[f"transformer_blocks.{i}.attn.add_k_proj.bias"] = torch.cat([context_k_bias]) converted_state_dict[f"transformer_blocks.{i}.attn.add_v_proj.weight"] = torch.cat([context_v]) converted_state_dict[f"transformer_blocks.{i}.attn.add_v_proj.bias"] = torch.cat([context_v_bias]) # output projections. converted_state_dict[f"transformer_blocks.{i}.attn.to_out.0.weight"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.attn.proj.weight" ) converted_state_dict[f"transformer_blocks.{i}.attn.to_out.0.bias"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.attn.proj.bias" ) if not (i == num_layers - 1): converted_state_dict[f"transformer_blocks.{i}.attn.to_add_out.weight"] = original_state_dict.pop( f"joint_blocks.{i}.context_block.attn.proj.weight" ) converted_state_dict[f"transformer_blocks.{i}.attn.to_add_out.bias"] = original_state_dict.pop( f"joint_blocks.{i}.context_block.attn.proj.bias" ) # norms. converted_state_dict[f"transformer_blocks.{i}.norm1.linear.weight"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.adaLN_modulation.1.weight" ) converted_state_dict[f"transformer_blocks.{i}.norm1.linear.bias"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.adaLN_modulation.1.bias" ) if not (i == num_layers - 1): converted_state_dict[f"transformer_blocks.{i}.norm1_context.linear.weight"] = original_state_dict.pop( f"joint_blocks.{i}.context_block.adaLN_modulation.1.weight" ) converted_state_dict[f"transformer_blocks.{i}.norm1_context.linear.bias"] = original_state_dict.pop( f"joint_blocks.{i}.context_block.adaLN_modulation.1.bias" ) else: converted_state_dict[f"transformer_blocks.{i}.norm1_context.linear.weight"] = swap_scale_shift( original_state_dict.pop(f"joint_blocks.{i}.context_block.adaLN_modulation.1.weight"), dim=caption_projection_dim, ) converted_state_dict[f"transformer_blocks.{i}.norm1_context.linear.bias"] = swap_scale_shift( original_state_dict.pop(f"joint_blocks.{i}.context_block.adaLN_modulation.1.bias"), dim=caption_projection_dim, ) # ffs. converted_state_dict[f"transformer_blocks.{i}.ff.net.0.proj.weight"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.mlp.fc1.weight" ) converted_state_dict[f"transformer_blocks.{i}.ff.net.0.proj.bias"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.mlp.fc1.bias" ) converted_state_dict[f"transformer_blocks.{i}.ff.net.2.weight"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.mlp.fc2.weight" ) converted_state_dict[f"transformer_blocks.{i}.ff.net.2.bias"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.mlp.fc2.bias" ) if not (i == num_layers - 1): converted_state_dict[f"transformer_blocks.{i}.ff_context.net.0.proj.weight"] = original_state_dict.pop( f"joint_blocks.{i}.context_block.mlp.fc1.weight" ) converted_state_dict[f"transformer_blocks.{i}.ff_context.net.0.proj.bias"] = original_state_dict.pop( f"joint_blocks.{i}.context_block.mlp.fc1.bias" ) converted_state_dict[f"transformer_blocks.{i}.ff_context.net.2.weight"] = original_state_dict.pop( f"joint_blocks.{i}.context_block.mlp.fc2.weight" ) converted_state_dict[f"transformer_blocks.{i}.ff_context.net.2.bias"] = original_state_dict.pop( f"joint_blocks.{i}.context_block.mlp.fc2.bias" ) # Final blocks. converted_state_dict["proj_out.weight"] = original_state_dict.pop("final_layer.linear.weight") converted_state_dict["proj_out.bias"] = original_state_dict.pop("final_layer.linear.bias") converted_state_dict["norm_out.linear.weight"] = swap_scale_shift( original_state_dict.pop("final_layer.adaLN_modulation.1.weight"), dim=caption_projection_dim ) converted_state_dict["norm_out.linear.bias"] = swap_scale_shift( original_state_dict.pop("final_layer.adaLN_modulation.1.bias"), dim=caption_projection_dim ) return converted_state_dict def is_vae_in_checkpoint(original_state_dict): return ("first_stage_model.decoder.conv_in.weight" in original_state_dict) and ( "first_stage_model.encoder.conv_in.weight" in original_state_dict ) def main(args): original_ckpt = load_original_checkpoint(args.checkpoint_path) num_layers = list(set(int(k.split(".", 2)[1]) for k in original_ckpt if "joint_blocks" in k))[-1] + 1 # noqa: C401 caption_projection_dim = 1536 converted_transformer_state_dict = convert_sd3_transformer_checkpoint_to_diffusers( original_ckpt, num_layers, caption_projection_dim ) with CTX(): transformer = SD3Transformer2DModel( sample_size=64, patch_size=2, in_channels=16, joint_attention_dim=4096, num_layers=num_layers, caption_projection_dim=caption_projection_dim, num_attention_heads=24, pos_embed_max_size=192, ) if is_accelerate_available(): load_model_dict_into_meta(transformer, converted_transformer_state_dict) else: transformer.load_state_dict(converted_transformer_state_dict, strict=True) print("Saving SD3 Transformer in Diffusers format.") transformer.to(dtype).save_pretrained(f"{args.output_path}/transformer") if is_vae_in_checkpoint(original_ckpt): with CTX(): vae = AutoencoderKL.from_config( "stabilityai/stable-diffusion-xl-base-1.0", subfolder="vae", latent_channels=16, use_post_quant_conv=False, use_quant_conv=False, scaling_factor=1.5305, shift_factor=0.0609, ) converted_vae_state_dict = convert_ldm_vae_checkpoint(original_ckpt, vae.config) if is_accelerate_available(): load_model_dict_into_meta(vae, converted_vae_state_dict) else: vae.load_state_dict(converted_vae_state_dict, strict=True) print("Saving SD3 Autoencoder in Diffusers format.") vae.to(dtype).save_pretrained(f"{args.output_path}/vae") if __name__ == "__main__": main(args)
diffusers/scripts/convert_sd3_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_sd3_to_diffusers.py", "repo_id": "diffusers", "token_count": 5323 }
133
# Run inside root directory of official source code: https://github.com/dome272/wuerstchen/ import os import torch from transformers import AutoTokenizer, CLIPTextModel from vqgan import VQModel from diffusers import ( DDPMWuerstchenScheduler, WuerstchenCombinedPipeline, WuerstchenDecoderPipeline, WuerstchenPriorPipeline, ) from diffusers.pipelines.wuerstchen import PaellaVQModel, WuerstchenDiffNeXt, WuerstchenPrior model_path = "models/" device = "cpu" paella_vqmodel = VQModel() state_dict = torch.load(os.path.join(model_path, "vqgan_f4_v1_500k.pt"), map_location=device)["state_dict"] paella_vqmodel.load_state_dict(state_dict) state_dict["vquantizer.embedding.weight"] = state_dict["vquantizer.codebook.weight"] state_dict.pop("vquantizer.codebook.weight") vqmodel = PaellaVQModel(num_vq_embeddings=paella_vqmodel.codebook_size, latent_channels=paella_vqmodel.c_latent) vqmodel.load_state_dict(state_dict) # Clip Text encoder and tokenizer text_encoder = CLIPTextModel.from_pretrained("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") tokenizer = AutoTokenizer.from_pretrained("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") # Generator gen_text_encoder = CLIPTextModel.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K").to("cpu") gen_tokenizer = AutoTokenizer.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K") orig_state_dict = torch.load(os.path.join(model_path, "model_v2_stage_b.pt"), map_location=device)["state_dict"] state_dict = {} for key in orig_state_dict.keys(): if key.endswith("in_proj_weight"): weights = orig_state_dict[key].chunk(3, 0) state_dict[key.replace("attn.in_proj_weight", "to_q.weight")] = weights[0] state_dict[key.replace("attn.in_proj_weight", "to_k.weight")] = weights[1] state_dict[key.replace("attn.in_proj_weight", "to_v.weight")] = weights[2] elif key.endswith("in_proj_bias"): weights = orig_state_dict[key].chunk(3, 0) state_dict[key.replace("attn.in_proj_bias", "to_q.bias")] = weights[0] state_dict[key.replace("attn.in_proj_bias", "to_k.bias")] = weights[1] state_dict[key.replace("attn.in_proj_bias", "to_v.bias")] = weights[2] elif key.endswith("out_proj.weight"): weights = orig_state_dict[key] state_dict[key.replace("attn.out_proj.weight", "to_out.0.weight")] = weights elif key.endswith("out_proj.bias"): weights = orig_state_dict[key] state_dict[key.replace("attn.out_proj.bias", "to_out.0.bias")] = weights else: state_dict[key] = orig_state_dict[key] deocder = WuerstchenDiffNeXt() deocder.load_state_dict(state_dict) # Prior orig_state_dict = torch.load(os.path.join(model_path, "model_v3_stage_c.pt"), map_location=device)["ema_state_dict"] state_dict = {} for key in orig_state_dict.keys(): if key.endswith("in_proj_weight"): weights = orig_state_dict[key].chunk(3, 0) state_dict[key.replace("attn.in_proj_weight", "to_q.weight")] = weights[0] state_dict[key.replace("attn.in_proj_weight", "to_k.weight")] = weights[1] state_dict[key.replace("attn.in_proj_weight", "to_v.weight")] = weights[2] elif key.endswith("in_proj_bias"): weights = orig_state_dict[key].chunk(3, 0) state_dict[key.replace("attn.in_proj_bias", "to_q.bias")] = weights[0] state_dict[key.replace("attn.in_proj_bias", "to_k.bias")] = weights[1] state_dict[key.replace("attn.in_proj_bias", "to_v.bias")] = weights[2] elif key.endswith("out_proj.weight"): weights = orig_state_dict[key] state_dict[key.replace("attn.out_proj.weight", "to_out.0.weight")] = weights elif key.endswith("out_proj.bias"): weights = orig_state_dict[key] state_dict[key.replace("attn.out_proj.bias", "to_out.0.bias")] = weights else: state_dict[key] = orig_state_dict[key] prior_model = WuerstchenPrior(c_in=16, c=1536, c_cond=1280, c_r=64, depth=32, nhead=24).to(device) prior_model.load_state_dict(state_dict) # scheduler scheduler = DDPMWuerstchenScheduler() # Prior pipeline prior_pipeline = WuerstchenPriorPipeline( prior=prior_model, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler ) prior_pipeline.save_pretrained("warp-ai/wuerstchen-prior") decoder_pipeline = WuerstchenDecoderPipeline( text_encoder=gen_text_encoder, tokenizer=gen_tokenizer, vqgan=vqmodel, decoder=deocder, scheduler=scheduler ) decoder_pipeline.save_pretrained("warp-ai/wuerstchen") # Wuerstchen pipeline wuerstchen_pipeline = WuerstchenCombinedPipeline( # Decoder text_encoder=gen_text_encoder, tokenizer=gen_tokenizer, decoder=deocder, scheduler=scheduler, vqgan=vqmodel, # Prior prior_tokenizer=tokenizer, prior_text_encoder=text_encoder, prior=prior_model, prior_scheduler=scheduler, ) wuerstchen_pipeline.save_pretrained("warp-ai/WuerstchenCombinedPipeline")
diffusers/scripts/convert_wuerstchen.py/0
{ "file_path": "diffusers/scripts/convert_wuerstchen.py", "repo_id": "diffusers", "token_count": 2171 }
134
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch import tqdm from ...models.unets.unet_1d import UNet1DModel from ...pipelines import DiffusionPipeline from ...utils.dummy_pt_objects import DDPMScheduler from ...utils.torch_utils import randn_tensor class ValueGuidedRLPipeline(DiffusionPipeline): r""" Pipeline for value-guided sampling from a diffusion model trained to predict sequences of states. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: value_function ([`UNet1DModel`]): A specialized UNet for fine-tuning trajectories base on reward. unet ([`UNet1DModel`]): UNet architecture to denoise the encoded trajectories. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded trajectories. Default for this application is [`DDPMScheduler`]. env (): An environment following the OpenAI gym API to act in. For now only Hopper has pretrained models. """ def __init__( self, value_function: UNet1DModel, unet: UNet1DModel, scheduler: DDPMScheduler, env, ): super().__init__() self.register_modules(value_function=value_function, unet=unet, scheduler=scheduler, env=env) self.data = env.get_dataset() self.means = {} for key in self.data.keys(): try: self.means[key] = self.data[key].mean() except: # noqa: E722 pass self.stds = {} for key in self.data.keys(): try: self.stds[key] = self.data[key].std() except: # noqa: E722 pass self.state_dim = env.observation_space.shape[0] self.action_dim = env.action_space.shape[0] def normalize(self, x_in, key): return (x_in - self.means[key]) / self.stds[key] def de_normalize(self, x_in, key): return x_in * self.stds[key] + self.means[key] def to_torch(self, x_in): if isinstance(x_in, dict): return {k: self.to_torch(v) for k, v in x_in.items()} elif torch.is_tensor(x_in): return x_in.to(self.unet.device) return torch.tensor(x_in, device=self.unet.device) def reset_x0(self, x_in, cond, act_dim): for key, val in cond.items(): x_in[:, key, act_dim:] = val.clone() return x_in def run_diffusion(self, x, conditions, n_guide_steps, scale): batch_size = x.shape[0] y = None for i in tqdm.tqdm(self.scheduler.timesteps): # create batch of timesteps to pass into model timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long) for _ in range(n_guide_steps): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models y = self.value_function(x.permute(0, 2, 1), timesteps).sample grad = torch.autograd.grad([y.sum()], [x])[0] posterior_variance = self.scheduler._get_variance(i) model_std = torch.exp(0.5 * posterior_variance) grad = model_std * grad grad[timesteps < 2] = 0 x = x.detach() x = x + scale * grad x = self.reset_x0(x, conditions, self.action_dim) prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) # TODO: verify deprecation of this kwarg x = self.scheduler.step(prev_x, i, x)["prev_sample"] # apply conditions to the trajectory (set the initial state) x = self.reset_x0(x, conditions, self.action_dim) x = self.to_torch(x) return x, y def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): # normalize the observations and create batch dimension obs = self.normalize(obs, "observations") obs = obs[None].repeat(batch_size, axis=0) conditions = {0: self.to_torch(obs)} shape = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) x1 = randn_tensor(shape, device=self.unet.device) x = self.reset_x0(x1, conditions, self.action_dim) x = self.to_torch(x) # run the diffusion process x, y = self.run_diffusion(x, conditions, n_guide_steps, scale) # sort output trajectories by value sorted_idx = y.argsort(0, descending=True).squeeze() sorted_values = x[sorted_idx] actions = sorted_values[:, :, : self.action_dim] actions = actions.detach().cpu().numpy() denorm_actions = self.de_normalize(actions, key="actions") # select the action with the highest value if y is not None: selected_index = 0 else: # if we didn't run value guiding, select a random action selected_index = np.random.randint(0, batch_size) denorm_actions = denorm_actions[selected_index, 0] return denorm_actions
diffusers/src/diffusers/experimental/rl/value_guided_sampling.py/0
{ "file_path": "diffusers/src/diffusers/experimental/rl/value_guided_sampling.py", "repo_id": "diffusers", "token_count": 2639 }
135
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..utils import ( DIFFUSERS_SLOW_IMPORT, _LazyModule, is_flax_available, is_torch_available, ) _import_structure = {} if is_torch_available(): _import_structure["adapter"] = ["MultiAdapter", "T2IAdapter"] _import_structure["autoencoders.autoencoder_asym_kl"] = ["AsymmetricAutoencoderKL"] _import_structure["autoencoders.autoencoder_kl"] = ["AutoencoderKL"] _import_structure["autoencoders.autoencoder_kl_cogvideox"] = ["AutoencoderKLCogVideoX"] _import_structure["autoencoders.autoencoder_kl_temporal_decoder"] = ["AutoencoderKLTemporalDecoder"] _import_structure["autoencoders.autoencoder_oobleck"] = ["AutoencoderOobleck"] _import_structure["autoencoders.autoencoder_tiny"] = ["AutoencoderTiny"] _import_structure["autoencoders.consistency_decoder_vae"] = ["ConsistencyDecoderVAE"] _import_structure["autoencoders.vq_model"] = ["VQModel"] _import_structure["controlnet"] = ["ControlNetModel"] _import_structure["controlnet_flux"] = ["FluxControlNetModel", "FluxMultiControlNetModel"] _import_structure["controlnet_hunyuan"] = ["HunyuanDiT2DControlNetModel", "HunyuanDiT2DMultiControlNetModel"] _import_structure["controlnet_sd3"] = ["SD3ControlNetModel", "SD3MultiControlNetModel"] _import_structure["controlnet_sparsectrl"] = ["SparseControlNetModel"] _import_structure["controlnet_xs"] = ["ControlNetXSAdapter", "UNetControlNetXSModel"] _import_structure["embeddings"] = ["ImageProjection"] _import_structure["modeling_utils"] = ["ModelMixin"] _import_structure["transformers.auraflow_transformer_2d"] = ["AuraFlowTransformer2DModel"] _import_structure["transformers.cogvideox_transformer_3d"] = ["CogVideoXTransformer3DModel"] _import_structure["transformers.dit_transformer_2d"] = ["DiTTransformer2DModel"] _import_structure["transformers.dual_transformer_2d"] = ["DualTransformer2DModel"] _import_structure["transformers.hunyuan_transformer_2d"] = ["HunyuanDiT2DModel"] _import_structure["transformers.latte_transformer_3d"] = ["LatteTransformer3DModel"] _import_structure["transformers.lumina_nextdit2d"] = ["LuminaNextDiT2DModel"] _import_structure["transformers.pixart_transformer_2d"] = ["PixArtTransformer2DModel"] _import_structure["transformers.prior_transformer"] = ["PriorTransformer"] _import_structure["transformers.stable_audio_transformer"] = ["StableAudioDiTModel"] _import_structure["transformers.t5_film_transformer"] = ["T5FilmDecoder"] _import_structure["transformers.transformer_2d"] = ["Transformer2DModel"] _import_structure["transformers.transformer_flux"] = ["FluxTransformer2DModel"] _import_structure["transformers.transformer_sd3"] = ["SD3Transformer2DModel"] _import_structure["transformers.transformer_temporal"] = ["TransformerTemporalModel"] _import_structure["unets.unet_1d"] = ["UNet1DModel"] _import_structure["unets.unet_2d"] = ["UNet2DModel"] _import_structure["unets.unet_2d_condition"] = ["UNet2DConditionModel"] _import_structure["unets.unet_3d_condition"] = ["UNet3DConditionModel"] _import_structure["unets.unet_i2vgen_xl"] = ["I2VGenXLUNet"] _import_structure["unets.unet_kandinsky3"] = ["Kandinsky3UNet"] _import_structure["unets.unet_motion_model"] = ["MotionAdapter", "UNetMotionModel"] _import_structure["unets.unet_spatio_temporal_condition"] = ["UNetSpatioTemporalConditionModel"] _import_structure["unets.unet_stable_cascade"] = ["StableCascadeUNet"] _import_structure["unets.uvit_2d"] = ["UVit2DModel"] if is_flax_available(): _import_structure["controlnet_flax"] = ["FlaxControlNetModel"] _import_structure["unets.unet_2d_condition_flax"] = ["FlaxUNet2DConditionModel"] _import_structure["vae_flax"] = ["FlaxAutoencoderKL"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: if is_torch_available(): from .adapter import MultiAdapter, T2IAdapter from .autoencoders import ( AsymmetricAutoencoderKL, AutoencoderKL, AutoencoderKLCogVideoX, AutoencoderKLTemporalDecoder, AutoencoderOobleck, AutoencoderTiny, ConsistencyDecoderVAE, VQModel, ) from .controlnet import ControlNetModel from .controlnet_flux import FluxControlNetModel, FluxMultiControlNetModel from .controlnet_hunyuan import HunyuanDiT2DControlNetModel, HunyuanDiT2DMultiControlNetModel from .controlnet_sd3 import SD3ControlNetModel, SD3MultiControlNetModel from .controlnet_sparsectrl import SparseControlNetModel from .controlnet_xs import ControlNetXSAdapter, UNetControlNetXSModel from .embeddings import ImageProjection from .modeling_utils import ModelMixin from .transformers import ( AuraFlowTransformer2DModel, CogVideoXTransformer3DModel, DiTTransformer2DModel, DualTransformer2DModel, FluxTransformer2DModel, HunyuanDiT2DModel, LatteTransformer3DModel, LuminaNextDiT2DModel, PixArtTransformer2DModel, PriorTransformer, SD3Transformer2DModel, StableAudioDiTModel, T5FilmDecoder, Transformer2DModel, TransformerTemporalModel, ) from .unets import ( I2VGenXLUNet, Kandinsky3UNet, MotionAdapter, StableCascadeUNet, UNet1DModel, UNet2DConditionModel, UNet2DModel, UNet3DConditionModel, UNetMotionModel, UNetSpatioTemporalConditionModel, UVit2DModel, ) if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unets import FlaxUNet2DConditionModel from .vae_flax import FlaxAutoencoderKL else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diffusers/src/diffusers/models/__init__.py/0
{ "file_path": "diffusers/src/diffusers/models/__init__.py", "repo_id": "diffusers", "token_count": 2731 }
136
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.nn import functional as F from ..configuration_utils import ConfigMixin, register_to_config from ..loaders.single_file_model import FromOriginalModelMixin from ..utils import BaseOutput, logging from .attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, ) from .embeddings import TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unets.unet_2d_blocks import ( CrossAttnDownBlock2D, DownBlock2D, UNetMidBlock2D, UNetMidBlock2DCrossAttn, get_down_block, ) from .unets.unet_2d_condition import UNet2DConditionModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class ControlNetOutput(BaseOutput): """ The output of [`ControlNetModel`]. Args: down_block_res_samples (`tuple[torch.Tensor]`): A tuple of downsample activations at different resolutions for each downsampling block. Each tensor should be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be used to condition the original UNet's downsampling activations. mid_down_block_re_sample (`torch.Tensor`): The activation of the middle block (the lowest sample resolution). Each tensor should be of shape `(batch_size, channel * lowest_resolution, height // lowest_resolution, width // lowest_resolution)`. Output can be used to condition the original UNet's middle block activation. """ down_block_res_samples: Tuple[torch.Tensor] mid_block_res_sample: torch.Tensor class ControlNetConditioningEmbedding(nn.Module): """ Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN [11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full model) to encode image-space conditions ... into feature maps ..." """ def __init__( self, conditioning_embedding_channels: int, conditioning_channels: int = 3, block_out_channels: Tuple[int, ...] = (16, 32, 96, 256), ): super().__init__() self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1) self.blocks = nn.ModuleList([]) for i in range(len(block_out_channels) - 1): channel_in = block_out_channels[i] channel_out = block_out_channels[i + 1] self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1)) self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2)) self.conv_out = zero_module( nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1) ) def forward(self, conditioning): embedding = self.conv_in(conditioning) embedding = F.silu(embedding) for block in self.blocks: embedding = block(embedding) embedding = F.silu(embedding) embedding = self.conv_out(embedding) return embedding class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalModelMixin): """ A ControlNet model. Args: in_channels (`int`, defaults to 4): The number of channels in the input sample. flip_sin_to_cos (`bool`, defaults to `True`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, defaults to 0): The frequency shift to apply to the time embedding. down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`): block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, defaults to 2): The number of layers per block. downsample_padding (`int`, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, defaults to 1): The scale factor to use for the mid block. act_fn (`str`, defaults to "silu"): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If None, normalization and activation layers is skipped in post-processing. norm_eps (`float`, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int`, defaults to 1280): The dimension of the cross attention features. transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. encoder_hid_dim (`int`, *optional*, defaults to None): If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` dimension to `cross_attention_dim`. encoder_hid_dim_type (`str`, *optional*, defaults to `None`): If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. attention_head_dim (`Union[int, Tuple[int]]`, defaults to 8): The dimension of the attention heads. use_linear_projection (`bool`, defaults to `False`): class_embed_type (`str`, *optional*, defaults to `None`): The type of class embedding to use which is ultimately summed with the time embeddings. Choose from None, `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. addition_embed_type (`str`, *optional*, defaults to `None`): Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or "text". "text" will use the `TextTimeEmbedding` layer. num_class_embeds (`int`, *optional*, defaults to 0): Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing class conditioning with `class_embed_type` equal to `None`. upcast_attention (`bool`, defaults to `False`): resnet_time_scale_shift (`str`, defaults to `"default"`): Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`. projection_class_embeddings_input_dim (`int`, *optional*, defaults to `None`): The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when `class_embed_type="projection"`. controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`): The channel order of conditional image. Will convert to `rgb` if it's `bgr`. conditioning_embedding_out_channels (`tuple[int]`, *optional*, defaults to `(16, 32, 96, 256)`): The tuple of output channel for each block in the `conditioning_embedding` layer. global_pool_conditions (`bool`, defaults to `False`): TODO(Patrick) - unused parameter. addition_embed_type_num_heads (`int`, defaults to 64): The number of heads to use for the `TextTimeEmbedding` layer. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, in_channels: int = 4, conditioning_channels: int = 3, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str, ...] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ), mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn", only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), layers_per_block: int = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: int = 1280, transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, attention_head_dim: Union[int, Tuple[int, ...]] = 8, num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", projection_class_embeddings_input_dim: Optional[int] = None, controlnet_conditioning_channel_order: str = "rgb", conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256), global_pool_conditions: bool = False, addition_embed_type_num_heads: int = 64, ): super().__init__() # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = num_attention_heads or attention_head_dim # Check inputs if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError( f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) # input conv_in_kernel = 3 conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) # time time_embed_dim = block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, ) if encoder_hid_dim_type is None and encoder_hid_dim is not None: encoder_hid_dim_type = "text_proj" self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") if encoder_hid_dim is None and encoder_hid_dim_type is not None: raise ValueError( f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." ) if encoder_hid_dim_type == "text_proj": self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) elif encoder_hid_dim_type == "text_image_proj": # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image_proj"` (Kandinsky 2.1)` self.encoder_hid_proj = TextImageProjection( text_embed_dim=encoder_hid_dim, image_embed_dim=cross_attention_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type is not None: raise ValueError( f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." ) else: self.encoder_hid_proj = None # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == "projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" ) # The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. # # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if addition_embed_type == "text": if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kandinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") # control net conditioning embedding self.controlnet_cond_embedding = ControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0], block_out_channels=conditioning_embedding_out_channels, conditioning_channels=conditioning_channels, ) self.down_blocks = nn.ModuleList([]) self.controlnet_down_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): only_cross_attention = [only_cross_attention] * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) # down output_channel = block_out_channels[0] controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block, transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads[i], attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, downsample_padding=downsample_padding, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, ) self.down_blocks.append(down_block) for _ in range(layers_per_block): controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) if not is_final_block: controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) # mid mid_block_channel = block_out_channels[-1] controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_mid_block = controlnet_block if mid_block_type == "UNetMidBlock2DCrossAttn": self.mid_block = UNetMidBlock2DCrossAttn( transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=mid_block_channel, temb_channels=time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, ) elif mid_block_type == "UNetMidBlock2D": self.mid_block = UNetMidBlock2D( in_channels=block_out_channels[-1], temb_channels=time_embed_dim, num_layers=0, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_groups=norm_num_groups, resnet_time_scale_shift=resnet_time_scale_shift, add_attention=False, ) else: raise ValueError(f"unknown mid_block_type : {mid_block_type}") @classmethod def from_unet( cls, unet: UNet2DConditionModel, controlnet_conditioning_channel_order: str = "rgb", conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256), load_weights_from_unet: bool = True, conditioning_channels: int = 3, ): r""" Instantiate a [`ControlNetModel`] from [`UNet2DConditionModel`]. Parameters: unet (`UNet2DConditionModel`): The UNet model weights to copy to the [`ControlNetModel`]. All configuration options are also copied where applicable. """ transformer_layers_per_block = ( unet.config.transformer_layers_per_block if "transformer_layers_per_block" in unet.config else 1 ) encoder_hid_dim = unet.config.encoder_hid_dim if "encoder_hid_dim" in unet.config else None encoder_hid_dim_type = unet.config.encoder_hid_dim_type if "encoder_hid_dim_type" in unet.config else None addition_embed_type = unet.config.addition_embed_type if "addition_embed_type" in unet.config else None addition_time_embed_dim = ( unet.config.addition_time_embed_dim if "addition_time_embed_dim" in unet.config else None ) controlnet = cls( encoder_hid_dim=encoder_hid_dim, encoder_hid_dim_type=encoder_hid_dim_type, addition_embed_type=addition_embed_type, addition_time_embed_dim=addition_time_embed_dim, transformer_layers_per_block=transformer_layers_per_block, in_channels=unet.config.in_channels, flip_sin_to_cos=unet.config.flip_sin_to_cos, freq_shift=unet.config.freq_shift, down_block_types=unet.config.down_block_types, only_cross_attention=unet.config.only_cross_attention, block_out_channels=unet.config.block_out_channels, layers_per_block=unet.config.layers_per_block, downsample_padding=unet.config.downsample_padding, mid_block_scale_factor=unet.config.mid_block_scale_factor, act_fn=unet.config.act_fn, norm_num_groups=unet.config.norm_num_groups, norm_eps=unet.config.norm_eps, cross_attention_dim=unet.config.cross_attention_dim, attention_head_dim=unet.config.attention_head_dim, num_attention_heads=unet.config.num_attention_heads, use_linear_projection=unet.config.use_linear_projection, class_embed_type=unet.config.class_embed_type, num_class_embeds=unet.config.num_class_embeds, upcast_attention=unet.config.upcast_attention, resnet_time_scale_shift=unet.config.resnet_time_scale_shift, projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim, mid_block_type=unet.config.mid_block_type, controlnet_conditioning_channel_order=controlnet_conditioning_channel_order, conditioning_embedding_out_channels=conditioning_embedding_out_channels, conditioning_channels=conditioning_channels, ) if load_weights_from_unet: controlnet.conv_in.load_state_dict(unet.conv_in.state_dict()) controlnet.time_proj.load_state_dict(unet.time_proj.state_dict()) controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict()) if controlnet.class_embedding: controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict()) if hasattr(controlnet, "add_embedding"): controlnet.add_embedding.load_state_dict(unet.add_embedding.state_dict()) controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict()) controlnet.mid_block.load_state_dict(unet.mid_block.state_dict()) return controlnet @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attention_slice def set_attention_slice(self, slice_size: Union[str, int, List[int]]) -> None: r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. This is useful for saving some memory in exchange for a small decrease in speed. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value: bool = False) -> None: if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)): module.gradient_checkpointing = value def forward( self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, controlnet_cond: torch.Tensor, conditioning_scale: float = 1.0, class_labels: Optional[torch.Tensor] = None, timestep_cond: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guess_mode: bool = False, return_dict: bool = True, ) -> Union[ControlNetOutput, Tuple[Tuple[torch.Tensor, ...], torch.Tensor]]: """ The [`ControlNetModel`] forward method. Args: sample (`torch.Tensor`): The noisy input tensor. timestep (`Union[torch.Tensor, float, int]`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.Tensor`): The encoder hidden states. controlnet_cond (`torch.Tensor`): The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`. conditioning_scale (`float`, defaults to `1.0`): The scale factor for ControlNet outputs. class_labels (`torch.Tensor`, *optional*, defaults to `None`): Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. timestep_cond (`torch.Tensor`, *optional*, defaults to `None`): Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep embeddings. attention_mask (`torch.Tensor`, *optional*, defaults to `None`): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. added_cond_kwargs (`dict`): Additional conditions for the Stable Diffusion XL UNet. cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`): A kwargs dictionary that if specified is passed along to the `AttnProcessor`. guess_mode (`bool`, defaults to `False`): In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended. return_dict (`bool`, defaults to `True`): Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple. Returns: [`~models.controlnet.ControlNetOutput`] **or** `tuple`: If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ # check channel order channel_order = self.config.controlnet_conditioning_channel_order if channel_order == "rgb": # in rgb order by default ... elif channel_order == "bgr": controlnet_cond = torch.flip(controlnet_cond, dims=[1]) else: raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}") # prepare attention_mask if attention_mask is not None: attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb, timestep_cond) aug_emb = None if self.class_embedding is not None: if class_labels is None: raise ValueError("class_labels should be provided when num_class_embeds > 0") if self.config.class_embed_type == "timestep": class_labels = self.time_proj(class_labels) class_emb = self.class_embedding(class_labels).to(dtype=self.dtype) emb = emb + class_emb if self.config.addition_embed_type is not None: if self.config.addition_embed_type == "text": aug_emb = self.add_embedding(encoder_hidden_states) elif self.config.addition_embed_type == "text_time": if "text_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" ) text_embeds = added_cond_kwargs.get("text_embeds") if "time_ids" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" ) time_ids = added_cond_kwargs.get("time_ids") time_embeds = self.add_time_proj(time_ids.flatten()) time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) add_embeds = add_embeds.to(emb.dtype) aug_emb = self.add_embedding(add_embeds) emb = emb + aug_emb if aug_emb is not None else emb # 2. pre-process sample = self.conv_in(sample) controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) sample = sample + controlnet_cond # 3. down down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, ) else: sample, res_samples = downsample_block(hidden_states=sample, temb=emb) down_block_res_samples += res_samples # 4. mid if self.mid_block is not None: if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention: sample = self.mid_block( sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, ) else: sample = self.mid_block(sample, emb) # 5. Control net blocks controlnet_down_block_res_samples = () for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks): down_block_res_sample = controlnet_block(down_block_res_sample) controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,) down_block_res_samples = controlnet_down_block_res_samples mid_block_res_sample = self.controlnet_mid_block(sample) # 6. scaling if guess_mode and not self.config.global_pool_conditions: scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0 scales = scales * conditioning_scale down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)] mid_block_res_sample = mid_block_res_sample * scales[-1] # last one else: down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample = mid_block_res_sample * conditioning_scale if self.config.global_pool_conditions: down_block_res_samples = [ torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples ] mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True) if not return_dict: return (down_block_res_samples, mid_block_res_sample) return ControlNetOutput( down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample ) def zero_module(module): for p in module.parameters(): nn.init.zeros_(p) return module
diffusers/src/diffusers/models/controlnet.py/0
{ "file_path": "diffusers/src/diffusers/models/controlnet.py", "repo_id": "diffusers", "token_count": 18842 }
137
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import itertools import json import os import re from collections import OrderedDict from functools import partial from pathlib import Path from typing import Any, Callable, List, Optional, Tuple, Union import safetensors import torch from huggingface_hub import create_repo, split_torch_state_dict_into_shards from huggingface_hub.utils import validate_hf_hub_args from torch import Tensor, nn from .. import __version__ from ..utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, _add_variant, _get_checkpoint_shard_files, _get_model_file, deprecate, is_accelerate_available, is_torch_version, logging, ) from ..utils.hub_utils import ( PushToHubMixin, load_or_create_model_card, populate_model_card, ) from .model_loading_utils import ( _determine_device_map, _fetch_index_file, _load_state_dict_into_model, load_model_dict_into_meta, load_state_dict, ) logger = logging.get_logger(__name__) _REGEX_SHARD = re.compile(r"(.*?)-\d{5}-of-\d{5}") if is_torch_version(">=", "1.9.0"): _LOW_CPU_MEM_USAGE_DEFAULT = True else: _LOW_CPU_MEM_USAGE_DEFAULT = False if is_accelerate_available(): import accelerate def get_parameter_device(parameter: torch.nn.Module) -> torch.device: try: parameters_and_buffers = itertools.chain(parameter.parameters(), parameter.buffers()) return next(parameters_and_buffers).device except StopIteration: # For torch.nn.DataParallel compatibility in PyTorch 1.5 def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = parameter._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].device def get_parameter_dtype(parameter: torch.nn.Module) -> torch.dtype: try: params = tuple(parameter.parameters()) if len(params) > 0: return params[0].dtype buffers = tuple(parameter.buffers()) if len(buffers) > 0: return buffers[0].dtype except StopIteration: # For torch.nn.DataParallel compatibility in PyTorch 1.5 def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = parameter._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].dtype class ModelMixin(torch.nn.Module, PushToHubMixin): r""" Base class for all models. [`ModelMixin`] takes care of storing the model configuration and provides methods for loading, downloading and saving models. - **config_name** ([`str`]) -- Filename to save a model to when calling [`~models.ModelMixin.save_pretrained`]. """ config_name = CONFIG_NAME _automatically_saved_args = ["_diffusers_version", "_class_name", "_name_or_path"] _supports_gradient_checkpointing = False _keys_to_ignore_on_load_unexpected = None _no_split_modules = None def __init__(self): super().__init__() def __getattr__(self, name: str) -> Any: """The only reason we overwrite `getattr` here is to gracefully deprecate accessing config attributes directly. See https://github.com/huggingface/diffusers/pull/3129 We need to overwrite __getattr__ here in addition so that we don't trigger `torch.nn.Module`'s __getattr__': https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module """ is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) is_attribute = name in self.__dict__ if is_in_config and not is_attribute: deprecation_message = f"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'unet.config.{name}'." deprecate("direct config name access", "1.0.0", deprecation_message, standard_warn=False, stacklevel=3) return self._internal_dict[name] # call PyTorch's https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module return super().__getattr__(name) @property def is_gradient_checkpointing(self) -> bool: """ Whether gradient checkpointing is activated for this model or not. """ return any(hasattr(m, "gradient_checkpointing") and m.gradient_checkpointing for m in self.modules()) def enable_gradient_checkpointing(self) -> None: """ Activates gradient checkpointing for the current model (may be referred to as *activation checkpointing* or *checkpoint activations* in other frameworks). """ if not self._supports_gradient_checkpointing: raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.") self.apply(partial(self._set_gradient_checkpointing, value=True)) def disable_gradient_checkpointing(self) -> None: """ Deactivates gradient checkpointing for the current model (may be referred to as *activation checkpointing* or *checkpoint activations* in other frameworks). """ if self._supports_gradient_checkpointing: self.apply(partial(self._set_gradient_checkpointing, value=False)) def set_use_npu_flash_attention(self, valid: bool) -> None: r""" Set the switch for the npu flash attention. """ def fn_recursive_set_npu_flash_attention(module: torch.nn.Module): if hasattr(module, "set_use_npu_flash_attention"): module.set_use_npu_flash_attention(valid) for child in module.children(): fn_recursive_set_npu_flash_attention(child) for module in self.children(): if isinstance(module, torch.nn.Module): fn_recursive_set_npu_flash_attention(module) def enable_npu_flash_attention(self) -> None: r""" Enable npu flash attention from torch_npu """ self.set_use_npu_flash_attention(True) def disable_npu_flash_attention(self) -> None: r""" disable npu flash attention from torch_npu """ self.set_use_npu_flash_attention(False) def set_use_memory_efficient_attention_xformers( self, valid: bool, attention_op: Optional[Callable] = None ) -> None: # Recursively walk through all the children. # Any children which exposes the set_use_memory_efficient_attention_xformers method # gets the message def fn_recursive_set_mem_eff(module: torch.nn.Module): if hasattr(module, "set_use_memory_efficient_attention_xformers"): module.set_use_memory_efficient_attention_xformers(valid, attention_op) for child in module.children(): fn_recursive_set_mem_eff(child) for module in self.children(): if isinstance(module, torch.nn.Module): fn_recursive_set_mem_eff(module) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None) -> None: r""" Enable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/). When this option is enabled, you should observe lower GPU memory usage and a potential speed up during inference. Speed up during training is not guaranteed. <Tip warning={true}> ⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes precedent. </Tip> Parameters: attention_op (`Callable`, *optional*): Override the default `None` operator for use as `op` argument to the [`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention) function of xFormers. Examples: ```py >>> import torch >>> from diffusers import UNet2DConditionModel >>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp >>> model = UNet2DConditionModel.from_pretrained( ... "stabilityai/stable-diffusion-2-1", subfolder="unet", torch_dtype=torch.float16 ... ) >>> model = model.to("cuda") >>> model.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) ``` """ self.set_use_memory_efficient_attention_xformers(True, attention_op) def disable_xformers_memory_efficient_attention(self) -> None: r""" Disable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/). """ self.set_use_memory_efficient_attention_xformers(False) def save_pretrained( self, save_directory: Union[str, os.PathLike], is_main_process: bool = True, save_function: Optional[Callable] = None, safe_serialization: bool = True, variant: Optional[str] = None, max_shard_size: Union[int, str] = "10GB", push_to_hub: bool = False, **kwargs, ): """ Save a model and its configuration file to a directory so that it can be reloaded using the [`~models.ModelMixin.from_pretrained`] class method. Arguments: save_directory (`str` or `os.PathLike`): Directory to save a model and its configuration file to. Will be created if it doesn't exist. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. variant (`str`, *optional*): If specified, weights are saved in the format `pytorch_model.<variant>.bin`. max_shard_size (`int` or `str`, defaults to `"10GB"`): The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5GB"`). If expressed as an integer, the unit is bytes. Note that this limit will be decreased after a certain period of time (starting from Oct 2024) to allow users to upgrade to the latest version of `diffusers`. This is to establish a common default size for this argument across different libraries in the Hugging Face ecosystem (`transformers`, and `accelerate`, for example). push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return weights_name = SAFETENSORS_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME weights_name = _add_variant(weights_name, variant) weight_name_split = weights_name.split(".") if len(weight_name_split) in [2, 3]: weights_name_pattern = weight_name_split[0] + "{suffix}." + ".".join(weight_name_split[1:]) else: raise ValueError(f"Invalid {weights_name} provided.") os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) private = kwargs.pop("private", False) create_pr = kwargs.pop("create_pr", False) token = kwargs.pop("token", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id # Only save the model itself if we are using distributed training model_to_save = self # Attach architecture to the config # Save the config if is_main_process: model_to_save.save_config(save_directory) # Save the model state_dict = model_to_save.state_dict() # Save the model state_dict_split = split_torch_state_dict_into_shards( state_dict, max_shard_size=max_shard_size, filename_pattern=weights_name_pattern ) # Clean the folder from a previous save if is_main_process: for filename in os.listdir(save_directory): if filename in state_dict_split.filename_to_tensors.keys(): continue full_filename = os.path.join(save_directory, filename) if not os.path.isfile(full_filename): continue weights_without_ext = weights_name_pattern.replace(".bin", "").replace(".safetensors", "") weights_without_ext = weights_without_ext.replace("{suffix}", "") filename_without_ext = filename.replace(".bin", "").replace(".safetensors", "") # make sure that file to be deleted matches format of sharded file, e.g. pytorch_model-00001-of-00005 if ( filename.startswith(weights_without_ext) and _REGEX_SHARD.fullmatch(filename_without_ext) is not None ): os.remove(full_filename) for filename, tensors in state_dict_split.filename_to_tensors.items(): shard = {tensor: state_dict[tensor] for tensor in tensors} filepath = os.path.join(save_directory, filename) if safe_serialization: # At some point we will need to deal better with save_function (used for TPU and other distributed # joyfulness), but for now this enough. safetensors.torch.save_file(shard, filepath, metadata={"format": "pt"}) else: torch.save(shard, filepath) if state_dict_split.is_sharded: index = { "metadata": state_dict_split.metadata, "weight_map": state_dict_split.tensor_to_filename, } save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME save_index_file = os.path.join(save_directory, _add_variant(save_index_file, variant)) # Save the index as well with open(save_index_file, "w", encoding="utf-8") as f: content = json.dumps(index, indent=2, sort_keys=True) + "\n" f.write(content) logger.info( f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be " f"split in {len(state_dict_split.filename_to_tensors)} checkpoint shards. You can find where each parameters has been saved in the " f"index located at {save_index_file}." ) else: path_to_weights = os.path.join(save_directory, weights_name) logger.info(f"Model weights saved in {path_to_weights}") if push_to_hub: # Create a new empty model card and eventually tag it model_card = load_or_create_model_card(repo_id, token=token) model_card = populate_model_card(model_card) model_card.save(Path(save_directory, "README.md").as_posix()) self._upload_folder( save_directory, repo_id, token=token, commit_message=commit_message, create_pr=create_pr, ) @classmethod @validate_hf_hub_args def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): r""" Instantiate a pretrained PyTorch model from a pretrained model configuration. The model is set in evaluation mode - `model.eval()` - by default, and dropout modules are deactivated. To train the model, set it back in training mode with `model.train()`. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`~ModelMixin.save_pretrained`]. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the dtype is automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info (`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. from_flax (`bool`, *optional*, defaults to `False`): Load the model weights from a Flax checkpoint save file. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. mirror (`str`, *optional*): Mirror source to resolve accessibility issues if you're downloading a model in China. We do not guarantee the timeliness or safety of the source, and you should refer to the mirror site for more information. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be defined for each parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the same device. Defaults to `None`, meaning that the model will be loaded on CPU. Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): The path to offload weights if `device_map` contains the value `"disk"`. offload_state_dict (`bool`, *optional*): If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. variant (`str`, *optional*): Load weights from a specified `variant` filename such as `"fp16"` or `"ema"`. This is ignored when loading `from_flax`. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the `safetensors` weights are downloaded if they're available **and** if the `safetensors` library is installed. If set to `True`, the model is forcibly loaded from `safetensors` weights. If set to `False`, `safetensors` weights are not loaded. <Tip> To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `huggingface-cli login`. You can also activate the special ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a firewalled environment. </Tip> Example: ```py from diffusers import UNet2DConditionModel unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet") ``` If you get the error message below, you need to finetune the weights for your downstream task: ```bash Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` """ cache_dir = kwargs.pop("cache_dir", None) ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False) force_download = kwargs.pop("force_download", False) from_flax = kwargs.pop("from_flax", False) proxies = kwargs.pop("proxies", None) output_loading_info = kwargs.pop("output_loading_info", False) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) torch_dtype = kwargs.pop("torch_dtype", None) subfolder = kwargs.pop("subfolder", None) device_map = kwargs.pop("device_map", None) max_memory = kwargs.pop("max_memory", None) offload_folder = kwargs.pop("offload_folder", None) offload_state_dict = kwargs.pop("offload_state_dict", False) low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) variant = kwargs.pop("variant", None) use_safetensors = kwargs.pop("use_safetensors", None) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True if low_cpu_mem_usage and not is_accelerate_available(): low_cpu_mem_usage = False logger.warning( "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" " install accelerate\n```\n." ) if device_map is not None and not is_accelerate_available(): raise NotImplementedError( "Loading and dispatching requires `accelerate`. Please make sure to install accelerate or set" " `device_map=None`. You can install accelerate with `pip install accelerate`." ) # Check if we can handle device_map and dispatching the weights if device_map is not None and not is_torch_version(">=", "1.9.0"): raise NotImplementedError( "Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set" " `device_map=None`." ) if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): raise NotImplementedError( "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" " `low_cpu_mem_usage=False`." ) if low_cpu_mem_usage is False and device_map is not None: raise ValueError( f"You cannot set `low_cpu_mem_usage` to `False` while using device_map={device_map} for loading and" " dispatching. Please make sure to set `low_cpu_mem_usage=True`." ) # change device_map into a map if we passed an int, a str or a torch.device if isinstance(device_map, torch.device): device_map = {"": device_map} elif isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: try: device_map = {"": torch.device(device_map)} except RuntimeError: raise ValueError( "When passing device_map as a string, the value needs to be a device name (e.g. cpu, cuda:0) or " f"'auto', 'balanced', 'balanced_low_0', 'sequential' but found {device_map}." ) elif isinstance(device_map, int): if device_map < 0: raise ValueError( "You can't pass device_map as a negative int. If you want to put the model on the cpu, pass device_map = 'cpu' " ) else: device_map = {"": device_map} if device_map is not None: if low_cpu_mem_usage is None: low_cpu_mem_usage = True elif not low_cpu_mem_usage: raise ValueError("Passing along a `device_map` requires `low_cpu_mem_usage=True`") if low_cpu_mem_usage: if device_map is not None and not is_torch_version(">=", "1.10"): # The max memory utils require PyTorch >= 1.10 to have torch.cuda.mem_get_info. raise ValueError("`low_cpu_mem_usage` and `device_map` require PyTorch >= 1.10.") # Load config if we don't provide a configuration config_path = pretrained_model_name_or_path user_agent = { "diffusers": __version__, "file_type": "model", "framework": "pytorch", } # load config config, unused_kwargs, commit_hash = cls.load_config( config_path, cache_dir=cache_dir, return_unused_kwargs=True, return_commit_hash=True, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, **kwargs, ) # Determine if we're loading from a directory of sharded checkpoints. is_sharded = False index_file = None is_local = os.path.isdir(pretrained_model_name_or_path) index_file = _fetch_index_file( is_local=is_local, pretrained_model_name_or_path=pretrained_model_name_or_path, subfolder=subfolder or "", use_safetensors=use_safetensors, cache_dir=cache_dir, variant=variant, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, user_agent=user_agent, commit_hash=commit_hash, ) if index_file is not None and index_file.is_file(): is_sharded = True if is_sharded and from_flax: raise ValueError("Loading of sharded checkpoints is not supported when `from_flax=True`.") # load model model_file = None if from_flax: model_file = _get_model_file( pretrained_model_name_or_path, weights_name=FLAX_WEIGHTS_NAME, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, commit_hash=commit_hash, ) model = cls.from_config(config, **unused_kwargs) # Convert the weights from .modeling_pytorch_flax_utils import load_flax_checkpoint_in_pytorch_model model = load_flax_checkpoint_in_pytorch_model(model, model_file) else: if is_sharded: sharded_ckpt_cached_folder, sharded_metadata = _get_checkpoint_shard_files( pretrained_model_name_or_path, index_file, cache_dir=cache_dir, proxies=proxies, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder or "", ) elif use_safetensors and not is_sharded: try: model_file = _get_model_file( pretrained_model_name_or_path, weights_name=_add_variant(SAFETENSORS_WEIGHTS_NAME, variant), cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, commit_hash=commit_hash, ) except IOError as e: logger.error(f"An error occurred while trying to fetch {pretrained_model_name_or_path}: {e}") if not allow_pickle: raise logger.warning( "Defaulting to unsafe serialization. Pass `allow_pickle=False` to raise an error instead." ) if model_file is None and not is_sharded: model_file = _get_model_file( pretrained_model_name_or_path, weights_name=_add_variant(WEIGHTS_NAME, variant), cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, commit_hash=commit_hash, ) if low_cpu_mem_usage: # Instantiate model with empty weights with accelerate.init_empty_weights(): model = cls.from_config(config, **unused_kwargs) # if device_map is None, load the state dict and move the params from meta device to the cpu if device_map is None and not is_sharded: param_device = "cpu" state_dict = load_state_dict(model_file, variant=variant) model._convert_deprecated_attention_blocks(state_dict) # move the params from meta device to cpu missing_keys = set(model.state_dict().keys()) - set(state_dict.keys()) if len(missing_keys) > 0: raise ValueError( f"Cannot load {cls} from {pretrained_model_name_or_path} because the following keys are" f" missing: \n {', '.join(missing_keys)}. \n Please make sure to pass" " `low_cpu_mem_usage=False` and `device_map=None` if you want to randomly initialize" " those weights or else make sure your checkpoint file is correct." ) unexpected_keys = load_model_dict_into_meta( model, state_dict, device=param_device, dtype=torch_dtype, model_name_or_path=pretrained_model_name_or_path, ) if cls._keys_to_ignore_on_load_unexpected is not None: for pat in cls._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: logger.warning( f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}" ) else: # else let accelerate handle loading and dispatching. # Load weights and dispatch according to the device_map # by default the device_map is None and the weights are loaded on the CPU force_hook = True device_map = _determine_device_map(model, device_map, max_memory, torch_dtype) if device_map is None and is_sharded: # we load the parameters on the cpu device_map = {"": "cpu"} force_hook = False try: accelerate.load_checkpoint_and_dispatch( model, model_file if not is_sharded else index_file, device_map, max_memory=max_memory, offload_folder=offload_folder, offload_state_dict=offload_state_dict, dtype=torch_dtype, force_hooks=force_hook, strict=True, ) except AttributeError as e: # When using accelerate loading, we do not have the ability to load the state # dict and rename the weight names manually. Additionally, accelerate skips # torch loading conventions and directly writes into `module.{_buffers, _parameters}` # (which look like they should be private variables?), so we can't use the standard hooks # to rename parameters on load. We need to mimic the original weight names so the correct # attributes are available. After we have loaded the weights, we convert the deprecated # names to the new non-deprecated names. Then we _greatly encourage_ the user to convert # the weights so we don't have to do this again. if "'Attention' object has no attribute" in str(e): logger.warning( f"Taking `{str(e)}` while using `accelerate.load_checkpoint_and_dispatch` to mean {pretrained_model_name_or_path}" " was saved with deprecated attention block weight names. We will load it with the deprecated attention block" " names and convert them on the fly to the new attention block format. Please re-save the model after this conversion," " so we don't have to do the on the fly renaming in the future. If the model is from a hub checkpoint," " please also re-upload it or open a PR on the original repository." ) model._temp_convert_self_to_deprecated_attention_blocks() accelerate.load_checkpoint_and_dispatch( model, model_file if not is_sharded else index_file, device_map, max_memory=max_memory, offload_folder=offload_folder, offload_state_dict=offload_state_dict, dtype=torch_dtype, force_hooks=force_hook, strict=True, ) model._undo_temp_convert_self_to_deprecated_attention_blocks() else: raise e loading_info = { "missing_keys": [], "unexpected_keys": [], "mismatched_keys": [], "error_msgs": [], } else: model = cls.from_config(config, **unused_kwargs) state_dict = load_state_dict(model_file, variant=variant) model._convert_deprecated_attention_blocks(state_dict) model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_pretrained_model( model, state_dict, model_file, pretrained_model_name_or_path, ignore_mismatched_sizes=ignore_mismatched_sizes, ) loading_info = { "missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "mismatched_keys": mismatched_keys, "error_msgs": error_msgs, } if torch_dtype is not None and not isinstance(torch_dtype, torch.dtype): raise ValueError( f"{torch_dtype} needs to be of type `torch.dtype`, e.g. `torch.float16`, but is {type(torch_dtype)}." ) elif torch_dtype is not None: model = model.to(torch_dtype) model.register_to_config(_name_or_path=pretrained_model_name_or_path) # Set model in evaluation mode to deactivate DropOut modules by default model.eval() if output_loading_info: return model, loading_info return model @classmethod def _load_pretrained_model( cls, model, state_dict: OrderedDict, resolved_archive_file, pretrained_model_name_or_path: Union[str, os.PathLike], ignore_mismatched_sizes: bool = False, ): # Retrieve missing & unexpected_keys model_state_dict = model.state_dict() loaded_keys = list(state_dict.keys()) expected_keys = list(model_state_dict.keys()) original_loaded_keys = loaded_keys missing_keys = list(set(expected_keys) - set(loaded_keys)) unexpected_keys = list(set(loaded_keys) - set(expected_keys)) # Make sure we are able to load base models as well as derived models (with heads) model_to_load = model def _find_mismatched_keys( state_dict, model_state_dict, loaded_keys, ignore_mismatched_sizes, ): mismatched_keys = [] if ignore_mismatched_sizes: for checkpoint_key in loaded_keys: model_key = checkpoint_key if ( model_key in model_state_dict and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape ): mismatched_keys.append( (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape) ) del state_dict[checkpoint_key] return mismatched_keys if state_dict is not None: # Whole checkpoint mismatched_keys = _find_mismatched_keys( state_dict, model_state_dict, original_loaded_keys, ignore_mismatched_sizes, ) error_msgs = _load_state_dict_into_model(model_to_load, state_dict) if len(error_msgs) > 0: error_msg = "\n\t".join(error_msgs) if "size mismatch" in error_msg: error_msg += ( "\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method." ) raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}") if len(unexpected_keys) > 0: logger.warning( f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task" " or with another architecture (e.g. initializing a BertForSequenceClassification model from a" " BertForPreTraining model).\n- This IS NOT expected if you are initializing" f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly" " identical (initializing a BertForSequenceClassification model from a" " BertForSequenceClassification model)." ) else: logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" " TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) elif len(mismatched_keys) == 0: logger.info( f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the" f" checkpoint was trained on, you can already use {model.__class__.__name__} for predictions" " without further training." ) if len(mismatched_keys) > 0: mismatched_warning = "\n".join( [ f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" for key, shape1, shape2 in mismatched_keys ] ) logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not" f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be" " able to use it for predictions and inference." ) return model, missing_keys, unexpected_keys, mismatched_keys, error_msgs @classmethod def _get_signature_keys(cls, obj): parameters = inspect.signature(obj.__init__).parameters required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty} optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty}) expected_modules = set(required_parameters.keys()) - {"self"} return expected_modules, optional_parameters # Adapted from `transformers` modeling_utils.py def _get_no_split_modules(self, device_map: str): """ Get the modules of the model that should not be spit when using device_map. We iterate through the modules to get the underlying `_no_split_modules`. Args: device_map (`str`): The device map value. Options are ["auto", "balanced", "balanced_low_0", "sequential"] Returns: `List[str]`: List of modules that should not be split """ _no_split_modules = set() modules_to_check = [self] while len(modules_to_check) > 0: module = modules_to_check.pop(-1) # if the module does not appear in _no_split_modules, we also check the children if module.__class__.__name__ not in _no_split_modules: if isinstance(module, ModelMixin): if module._no_split_modules is None: raise ValueError( f"{module.__class__.__name__} does not support `device_map='{device_map}'`. To implement support, the model " "class needs to implement the `_no_split_modules` attribute." ) else: _no_split_modules = _no_split_modules | set(module._no_split_modules) modules_to_check += list(module.children()) return list(_no_split_modules) @property def device(self) -> torch.device: """ `torch.device`: The device on which the module is (assuming that all the module parameters are on the same device). """ return get_parameter_device(self) @property def dtype(self) -> torch.dtype: """ `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype). """ return get_parameter_dtype(self) def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int: """ Get number of (trainable or non-embedding) parameters in the module. Args: only_trainable (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of trainable parameters. exclude_embeddings (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of non-embedding parameters. Returns: `int`: The number of parameters. Example: ```py from diffusers import UNet2DConditionModel model_id = "runwayml/stable-diffusion-v1-5" unet = UNet2DConditionModel.from_pretrained(model_id, subfolder="unet") unet.num_parameters(only_trainable=True) 859520964 ``` """ if exclude_embeddings: embedding_param_names = [ f"{name}.weight" for name, module_type in self.named_modules() if isinstance(module_type, torch.nn.Embedding) ] non_embedding_parameters = [ parameter for name, parameter in self.named_parameters() if name not in embedding_param_names ] return sum(p.numel() for p in non_embedding_parameters if p.requires_grad or not only_trainable) else: return sum(p.numel() for p in self.parameters() if p.requires_grad or not only_trainable) def _convert_deprecated_attention_blocks(self, state_dict: OrderedDict) -> None: deprecated_attention_block_paths = [] def recursive_find_attn_block(name, module): if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block: deprecated_attention_block_paths.append(name) for sub_name, sub_module in module.named_children(): sub_name = sub_name if name == "" else f"{name}.{sub_name}" recursive_find_attn_block(sub_name, sub_module) recursive_find_attn_block("", self) # NOTE: we have to check if the deprecated parameters are in the state dict # because it is possible we are loading from a state dict that was already # converted for path in deprecated_attention_block_paths: # group_norm path stays the same # query -> to_q if f"{path}.query.weight" in state_dict: state_dict[f"{path}.to_q.weight"] = state_dict.pop(f"{path}.query.weight") if f"{path}.query.bias" in state_dict: state_dict[f"{path}.to_q.bias"] = state_dict.pop(f"{path}.query.bias") # key -> to_k if f"{path}.key.weight" in state_dict: state_dict[f"{path}.to_k.weight"] = state_dict.pop(f"{path}.key.weight") if f"{path}.key.bias" in state_dict: state_dict[f"{path}.to_k.bias"] = state_dict.pop(f"{path}.key.bias") # value -> to_v if f"{path}.value.weight" in state_dict: state_dict[f"{path}.to_v.weight"] = state_dict.pop(f"{path}.value.weight") if f"{path}.value.bias" in state_dict: state_dict[f"{path}.to_v.bias"] = state_dict.pop(f"{path}.value.bias") # proj_attn -> to_out.0 if f"{path}.proj_attn.weight" in state_dict: state_dict[f"{path}.to_out.0.weight"] = state_dict.pop(f"{path}.proj_attn.weight") if f"{path}.proj_attn.bias" in state_dict: state_dict[f"{path}.to_out.0.bias"] = state_dict.pop(f"{path}.proj_attn.bias") def _temp_convert_self_to_deprecated_attention_blocks(self) -> None: deprecated_attention_block_modules = [] def recursive_find_attn_block(module): if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block: deprecated_attention_block_modules.append(module) for sub_module in module.children(): recursive_find_attn_block(sub_module) recursive_find_attn_block(self) for module in deprecated_attention_block_modules: module.query = module.to_q module.key = module.to_k module.value = module.to_v module.proj_attn = module.to_out[0] # We don't _have_ to delete the old attributes, but it's helpful to ensure # that _all_ the weights are loaded into the new attributes and we're not # making an incorrect assumption that this model should be converted when # it really shouldn't be. del module.to_q del module.to_k del module.to_v del module.to_out def _undo_temp_convert_self_to_deprecated_attention_blocks(self) -> None: deprecated_attention_block_modules = [] def recursive_find_attn_block(module) -> None: if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block: deprecated_attention_block_modules.append(module) for sub_module in module.children(): recursive_find_attn_block(sub_module) recursive_find_attn_block(self) for module in deprecated_attention_block_modules: module.to_q = module.query module.to_k = module.key module.to_v = module.value module.to_out = nn.ModuleList([module.proj_attn, nn.Dropout(module.dropout)]) del module.query del module.key del module.value del module.proj_attn class LegacyModelMixin(ModelMixin): r""" A subclass of `ModelMixin` to resolve class mapping from legacy classes (like `Transformer2DModel`) to more pipeline-specific classes (like `DiTTransformer2DModel`). """ @classmethod @validate_hf_hub_args def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): # To prevent dependency import problem. from .model_loading_utils import _fetch_remapped_cls_from_config # Create a copy of the kwargs so that we don't mess with the keyword arguments in the downstream calls. kwargs_copy = kwargs.copy() cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) # Load config if we don't provide a configuration config_path = pretrained_model_name_or_path user_agent = { "diffusers": __version__, "file_type": "model", "framework": "pytorch", } # load config config, _, _ = cls.load_config( config_path, cache_dir=cache_dir, return_unused_kwargs=True, return_commit_hash=True, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, **kwargs, ) # resolve remapping remapped_class = _fetch_remapped_cls_from_config(config, cls) return remapped_class.from_pretrained(pretrained_model_name_or_path, **kwargs_copy)
diffusers/src/diffusers/models/modeling_utils.py/0
{ "file_path": "diffusers/src/diffusers/models/modeling_utils.py", "repo_id": "diffusers", "token_count": 25801 }
138
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ...configuration_utils import LegacyConfigMixin, register_to_config from ...utils import deprecate, is_torch_version, logging from ..attention import BasicTransformerBlock from ..embeddings import ImagePositionalEmbeddings, PatchEmbed, PixArtAlphaTextProjection from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import LegacyModelMixin from ..normalization import AdaLayerNormSingle logger = logging.get_logger(__name__) # pylint: disable=invalid-name class Transformer2DModelOutput(Transformer2DModelOutput): def __init__(self, *args, **kwargs): deprecation_message = "Importing `Transformer2DModelOutput` from `diffusers.models.transformer_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.modeling_outputs import Transformer2DModelOutput`, instead." deprecate("Transformer2DModelOutput", "1.0.0", deprecation_message) super().__init__(*args, **kwargs) class Transformer2DModel(LegacyModelMixin, LegacyConfigMixin): """ A 2D Transformer model for image-like data. Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): The number of channels in the input and output (specify if the input is **continuous**). num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**). This is fixed during training since it is used to learn a number of position embeddings. num_vector_embeds (`int`, *optional*): The number of classes of the vector embeddings of the latent pixels (specify if the input is **discrete**). Includes the class for the masked latent pixel. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward. num_embeds_ada_norm ( `int`, *optional*): The number of diffusion steps used during training. Pass if at least one of the norm_layers is `AdaLayerNorm`. This is fixed during training since it is used to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for up to but not more steps than `num_embeds_ada_norm`. attention_bias (`bool`, *optional*): Configure if the `TransformerBlocks` attention should contain a bias parameter. """ _supports_gradient_checkpointing = True _no_split_modules = ["BasicTransformerBlock"] @register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: Optional[int] = None, num_vector_embeds: Optional[int] = None, patch_size: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, use_linear_projection: bool = False, only_cross_attention: bool = False, double_self_attention: bool = False, upcast_attention: bool = False, norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single', 'ada_norm_continuous', 'layer_norm_i2vgen' norm_elementwise_affine: bool = True, norm_eps: float = 1e-5, attention_type: str = "default", caption_channels: int = None, interpolation_scale: float = None, use_additional_conditions: Optional[bool] = None, ): super().__init__() # Validate inputs. if patch_size is not None: if norm_type not in ["ada_norm", "ada_norm_zero", "ada_norm_single"]: raise NotImplementedError( f"Forward pass is not implemented when `patch_size` is not None and `norm_type` is '{norm_type}'." ) elif norm_type in ["ada_norm", "ada_norm_zero"] and num_embeds_ada_norm is None: raise ValueError( f"When using a `patch_size` and this `norm_type` ({norm_type}), `num_embeds_ada_norm` cannot be None." ) # 1. Transformer2DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)` # Define whether input is continuous or discrete depending on configuration self.is_input_continuous = (in_channels is not None) and (patch_size is None) self.is_input_vectorized = num_vector_embeds is not None self.is_input_patches = in_channels is not None and patch_size is not None if self.is_input_continuous and self.is_input_vectorized: raise ValueError( f"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make" " sure that either `in_channels` or `num_vector_embeds` is None." ) elif self.is_input_vectorized and self.is_input_patches: raise ValueError( f"Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make" " sure that either `num_vector_embeds` or `num_patches` is None." ) elif not self.is_input_continuous and not self.is_input_vectorized and not self.is_input_patches: raise ValueError( f"Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size:" f" {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None." ) if norm_type == "layer_norm" and num_embeds_ada_norm is not None: deprecation_message = ( f"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or" " incorrectly set to `'layer_norm'`. Make sure to set `norm_type` to `'ada_norm'` in the config." " Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect" " results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it" " would be very nice if you could open a Pull request for the `transformer/config.json` file" ) deprecate("norm_type!=num_embeds_ada_norm", "1.0.0", deprecation_message, standard_warn=False) norm_type = "ada_norm" # Set some common variables used across the board. self.use_linear_projection = use_linear_projection self.interpolation_scale = interpolation_scale self.caption_channels = caption_channels self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim self.in_channels = in_channels self.out_channels = in_channels if out_channels is None else out_channels self.gradient_checkpointing = False if use_additional_conditions is None: if norm_type == "ada_norm_single" and sample_size == 128: use_additional_conditions = True else: use_additional_conditions = False self.use_additional_conditions = use_additional_conditions # 2. Initialize the right blocks. # These functions follow a common structure: # a. Initialize the input blocks. b. Initialize the transformer blocks. # c. Initialize the output blocks and other projection blocks when necessary. if self.is_input_continuous: self._init_continuous_input(norm_type=norm_type) elif self.is_input_vectorized: self._init_vectorized_inputs(norm_type=norm_type) elif self.is_input_patches: self._init_patched_inputs(norm_type=norm_type) def _init_continuous_input(self, norm_type): self.norm = torch.nn.GroupNorm( num_groups=self.config.norm_num_groups, num_channels=self.in_channels, eps=1e-6, affine=True ) if self.use_linear_projection: self.proj_in = torch.nn.Linear(self.in_channels, self.inner_dim) else: self.proj_in = torch.nn.Conv2d(self.in_channels, self.inner_dim, kernel_size=1, stride=1, padding=0) self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( self.inner_dim, self.config.num_attention_heads, self.config.attention_head_dim, dropout=self.config.dropout, cross_attention_dim=self.config.cross_attention_dim, activation_fn=self.config.activation_fn, num_embeds_ada_norm=self.config.num_embeds_ada_norm, attention_bias=self.config.attention_bias, only_cross_attention=self.config.only_cross_attention, double_self_attention=self.config.double_self_attention, upcast_attention=self.config.upcast_attention, norm_type=norm_type, norm_elementwise_affine=self.config.norm_elementwise_affine, norm_eps=self.config.norm_eps, attention_type=self.config.attention_type, ) for _ in range(self.config.num_layers) ] ) if self.use_linear_projection: self.proj_out = torch.nn.Linear(self.inner_dim, self.out_channels) else: self.proj_out = torch.nn.Conv2d(self.inner_dim, self.out_channels, kernel_size=1, stride=1, padding=0) def _init_vectorized_inputs(self, norm_type): assert self.config.sample_size is not None, "Transformer2DModel over discrete input must provide sample_size" assert ( self.config.num_vector_embeds is not None ), "Transformer2DModel over discrete input must provide num_embed" self.height = self.config.sample_size self.width = self.config.sample_size self.num_latent_pixels = self.height * self.width self.latent_image_embedding = ImagePositionalEmbeddings( num_embed=self.config.num_vector_embeds, embed_dim=self.inner_dim, height=self.height, width=self.width ) self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( self.inner_dim, self.config.num_attention_heads, self.config.attention_head_dim, dropout=self.config.dropout, cross_attention_dim=self.config.cross_attention_dim, activation_fn=self.config.activation_fn, num_embeds_ada_norm=self.config.num_embeds_ada_norm, attention_bias=self.config.attention_bias, only_cross_attention=self.config.only_cross_attention, double_self_attention=self.config.double_self_attention, upcast_attention=self.config.upcast_attention, norm_type=norm_type, norm_elementwise_affine=self.config.norm_elementwise_affine, norm_eps=self.config.norm_eps, attention_type=self.config.attention_type, ) for _ in range(self.config.num_layers) ] ) self.norm_out = nn.LayerNorm(self.inner_dim) self.out = nn.Linear(self.inner_dim, self.config.num_vector_embeds - 1) def _init_patched_inputs(self, norm_type): assert self.config.sample_size is not None, "Transformer2DModel over patched input must provide sample_size" self.height = self.config.sample_size self.width = self.config.sample_size self.patch_size = self.config.patch_size interpolation_scale = ( self.config.interpolation_scale if self.config.interpolation_scale is not None else max(self.config.sample_size // 64, 1) ) self.pos_embed = PatchEmbed( height=self.config.sample_size, width=self.config.sample_size, patch_size=self.config.patch_size, in_channels=self.in_channels, embed_dim=self.inner_dim, interpolation_scale=interpolation_scale, ) self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( self.inner_dim, self.config.num_attention_heads, self.config.attention_head_dim, dropout=self.config.dropout, cross_attention_dim=self.config.cross_attention_dim, activation_fn=self.config.activation_fn, num_embeds_ada_norm=self.config.num_embeds_ada_norm, attention_bias=self.config.attention_bias, only_cross_attention=self.config.only_cross_attention, double_self_attention=self.config.double_self_attention, upcast_attention=self.config.upcast_attention, norm_type=norm_type, norm_elementwise_affine=self.config.norm_elementwise_affine, norm_eps=self.config.norm_eps, attention_type=self.config.attention_type, ) for _ in range(self.config.num_layers) ] ) if self.config.norm_type != "ada_norm_single": self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out_1 = nn.Linear(self.inner_dim, 2 * self.inner_dim) self.proj_out_2 = nn.Linear( self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels ) elif self.config.norm_type == "ada_norm_single": self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6) self.scale_shift_table = nn.Parameter(torch.randn(2, self.inner_dim) / self.inner_dim**0.5) self.proj_out = nn.Linear( self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels ) # PixArt-Alpha blocks. self.adaln_single = None if self.config.norm_type == "ada_norm_single": # TODO(Sayak, PVP) clean this, for now we use sample size to determine whether to use # additional conditions until we find better name self.adaln_single = AdaLayerNormSingle( self.inner_dim, use_additional_conditions=self.use_additional_conditions ) self.caption_projection = None if self.caption_channels is not None: self.caption_projection = PixArtAlphaTextProjection( in_features=self.caption_channels, hidden_size=self.inner_dim ) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, timestep: Optional[torch.LongTensor] = None, added_cond_kwargs: Dict[str, torch.Tensor] = None, class_labels: Optional[torch.LongTensor] = None, cross_attention_kwargs: Dict[str, Any] = None, attention_mask: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, return_dict: bool = True, ): """ The [`Transformer2DModel`] forward method. Args: hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.Tensor` of shape `(batch size, channel, height, width)` if continuous): Input `hidden_states`. encoder_hidden_states ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. timestep ( `torch.LongTensor`, *optional*): Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`. class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in `AdaLayerZeroNorm`. cross_attention_kwargs ( `Dict[str, Any]`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). attention_mask ( `torch.Tensor`, *optional*): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. encoder_attention_mask ( `torch.Tensor`, *optional*): Cross-attention mask applied to `encoder_hidden_states`. Two formats supported: * Mask `(batch, sequence_length)` True = keep, False = discard. * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard. If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format above. This bias will be added to the cross-attention scores. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. Returns: If `return_dict` is True, an [`~models.transformers.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ if cross_attention_kwargs is not None: if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") # ensure attention_mask is a bias, and give it a singleton query_tokens dimension. # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward. # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias. # expects mask of shape: # [batch, key_tokens] # adds singleton query_tokens dimension: # [batch, 1, key_tokens] # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) if attention_mask is not None and attention_mask.ndim == 2: # assume that mask is expressed as: # (1 = keep, 0 = discard) # convert mask into a bias that can be added to attention scores: # (keep = +0, discard = -10000.0) attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # convert encoder_attention_mask to a bias the same way we do for attention_mask if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2: encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) # 1. Input if self.is_input_continuous: batch_size, _, height, width = hidden_states.shape residual = hidden_states hidden_states, inner_dim = self._operate_on_continuous_inputs(hidden_states) elif self.is_input_vectorized: hidden_states = self.latent_image_embedding(hidden_states) elif self.is_input_patches: height, width = hidden_states.shape[-2] // self.patch_size, hidden_states.shape[-1] // self.patch_size hidden_states, encoder_hidden_states, timestep, embedded_timestep = self._operate_on_patched_inputs( hidden_states, encoder_hidden_states, timestep, added_cond_kwargs ) # 2. Blocks for block in self.transformer_blocks: if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, timestep, cross_attention_kwargs, class_labels, **ckpt_kwargs, ) else: hidden_states = block( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels, ) # 3. Output if self.is_input_continuous: output = self._get_output_for_continuous_inputs( hidden_states=hidden_states, residual=residual, batch_size=batch_size, height=height, width=width, inner_dim=inner_dim, ) elif self.is_input_vectorized: output = self._get_output_for_vectorized_inputs(hidden_states) elif self.is_input_patches: output = self._get_output_for_patched_inputs( hidden_states=hidden_states, timestep=timestep, class_labels=class_labels, embedded_timestep=embedded_timestep, height=height, width=width, ) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output) def _operate_on_continuous_inputs(self, hidden_states): batch, _, height, width = hidden_states.shape hidden_states = self.norm(hidden_states) if not self.use_linear_projection: hidden_states = self.proj_in(hidden_states) inner_dim = hidden_states.shape[1] hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim) else: inner_dim = hidden_states.shape[1] hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim) hidden_states = self.proj_in(hidden_states) return hidden_states, inner_dim def _operate_on_patched_inputs(self, hidden_states, encoder_hidden_states, timestep, added_cond_kwargs): batch_size = hidden_states.shape[0] hidden_states = self.pos_embed(hidden_states) embedded_timestep = None if self.adaln_single is not None: if self.use_additional_conditions and added_cond_kwargs is None: raise ValueError( "`added_cond_kwargs` cannot be None when using additional conditions for `adaln_single`." ) timestep, embedded_timestep = self.adaln_single( timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype ) if self.caption_projection is not None: encoder_hidden_states = self.caption_projection(encoder_hidden_states) encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1]) return hidden_states, encoder_hidden_states, timestep, embedded_timestep def _get_output_for_continuous_inputs(self, hidden_states, residual, batch_size, height, width, inner_dim): if not self.use_linear_projection: hidden_states = ( hidden_states.reshape(batch_size, height, width, inner_dim).permute(0, 3, 1, 2).contiguous() ) hidden_states = self.proj_out(hidden_states) else: hidden_states = self.proj_out(hidden_states) hidden_states = ( hidden_states.reshape(batch_size, height, width, inner_dim).permute(0, 3, 1, 2).contiguous() ) output = hidden_states + residual return output def _get_output_for_vectorized_inputs(self, hidden_states): hidden_states = self.norm_out(hidden_states) logits = self.out(hidden_states) # (batch, self.num_vector_embeds - 1, self.num_latent_pixels) logits = logits.permute(0, 2, 1) # log(p(x_0)) output = F.log_softmax(logits.double(), dim=1).float() return output def _get_output_for_patched_inputs( self, hidden_states, timestep, class_labels, embedded_timestep, height=None, width=None ): if self.config.norm_type != "ada_norm_single": conditioning = self.transformer_blocks[0].norm1.emb( timestep, class_labels, hidden_dtype=hidden_states.dtype ) shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1) hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None] hidden_states = self.proj_out_2(hidden_states) elif self.config.norm_type == "ada_norm_single": shift, scale = (self.scale_shift_table[None] + embedded_timestep[:, None]).chunk(2, dim=1) hidden_states = self.norm_out(hidden_states) # Modulation hidden_states = hidden_states * (1 + scale) + shift hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.squeeze(1) # unpatchify if self.adaln_single is None: height = width = int(hidden_states.shape[1] ** 0.5) hidden_states = hidden_states.reshape( shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) ) hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) output = hidden_states.reshape( shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size) ) return output
diffusers/src/diffusers/models/transformers/transformer_2d.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/transformer_2d.py", "repo_id": "diffusers", "token_count": 13046 }
139
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Any, Dict, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint from ...configuration_utils import ConfigMixin, FrozenDict, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin, UNet2DConditionLoadersMixin from ...utils import BaseOutput, deprecate, is_torch_version, logging from ...utils.torch_utils import apply_freeu from ..attention import BasicTransformerBlock from ..attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, Attention, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, AttnProcessor2_0, FusedAttnProcessor2_0, IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0, ) from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from ..resnet import Downsample2D, ResnetBlock2D, Upsample2D from ..transformers.dual_transformer_2d import DualTransformer2DModel from ..transformers.transformer_2d import Transformer2DModel from .unet_2d_blocks import UNetMidBlock2DCrossAttn from .unet_2d_condition import UNet2DConditionModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class UNetMotionOutput(BaseOutput): """ The output of [`UNetMotionOutput`]. Args: sample (`torch.Tensor` of shape `(batch_size, num_channels, num_frames, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: torch.Tensor class AnimateDiffTransformer3D(nn.Module): """ A Transformer model for video-like data. Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): The number of channels in the input and output (specify if the input is **continuous**). num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. attention_bias (`bool`, *optional*): Configure if the `TransformerBlock` attention should contain a bias parameter. sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**). This is fixed during training since it is used to learn a number of position embeddings. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward. See `diffusers.models.activations.get_activation` for supported activation functions. norm_elementwise_affine (`bool`, *optional*): Configure if the `TransformerBlock` should use learnable elementwise affine parameters for normalization. double_self_attention (`bool`, *optional*): Configure if each `TransformerBlock` should contain two self-attention layers. positional_embeddings: (`str`, *optional*): The type of positional embeddings to apply to the sequence input before passing use. num_positional_embeddings: (`int`, *optional*): The maximum length of the sequence over which to apply positional embeddings. """ def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: Optional[int] = None, activation_fn: str = "geglu", norm_elementwise_affine: bool = True, double_self_attention: bool = True, positional_embeddings: Optional[str] = None, num_positional_embeddings: Optional[int] = None, ): super().__init__() self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim self.in_channels = in_channels self.norm = nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True) self.proj_in = nn.Linear(in_channels, inner_dim) # 3. Define transformers blocks self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, attention_bias=attention_bias, double_self_attention=double_self_attention, norm_elementwise_affine=norm_elementwise_affine, positional_embeddings=positional_embeddings, num_positional_embeddings=num_positional_embeddings, ) for _ in range(num_layers) ] ) self.proj_out = nn.Linear(inner_dim, in_channels) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.LongTensor] = None, timestep: Optional[torch.LongTensor] = None, class_labels: Optional[torch.LongTensor] = None, num_frames: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ) -> torch.Tensor: """ The [`AnimateDiffTransformer3D`] forward method. Args: hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.Tensor` of shape `(batch size, channel, height, width)` if continuous): Input hidden_states. encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. timestep ( `torch.LongTensor`, *optional*): Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`. class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in `AdaLayerZeroNorm`. num_frames (`int`, *optional*, defaults to 1): The number of frames to be processed per batch. This is used to reshape the hidden states. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). Returns: torch.Tensor: The output tensor. """ # 1. Input batch_frames, channel, height, width = hidden_states.shape batch_size = batch_frames // num_frames residual = hidden_states hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, channel, height, width) hidden_states = hidden_states.permute(0, 2, 1, 3, 4) hidden_states = self.norm(hidden_states) hidden_states = hidden_states.permute(0, 3, 4, 2, 1).reshape(batch_size * height * width, num_frames, channel) hidden_states = self.proj_in(hidden_states) # 2. Blocks for block in self.transformer_blocks: hidden_states = block( hidden_states, encoder_hidden_states=encoder_hidden_states, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels, ) # 3. Output hidden_states = self.proj_out(hidden_states) hidden_states = ( hidden_states[None, None, :] .reshape(batch_size, height, width, num_frames, channel) .permute(0, 3, 4, 1, 2) .contiguous() ) hidden_states = hidden_states.reshape(batch_frames, channel, height, width) output = hidden_states + residual return output class DownBlockMotion(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor: float = 1.0, add_downsample: bool = True, downsample_padding: int = 1, temporal_num_attention_heads: Union[int, Tuple[int]] = 1, temporal_cross_attention_dim: Optional[int] = None, temporal_max_seq_length: int = 32, temporal_transformer_layers_per_block: Union[int, Tuple[int]] = 1, temporal_double_self_attention: bool = True, ): super().__init__() resnets = [] motion_modules = [] # support for variable transformer layers per temporal block if isinstance(temporal_transformer_layers_per_block, int): temporal_transformer_layers_per_block = (temporal_transformer_layers_per_block,) * num_layers elif len(temporal_transformer_layers_per_block) != num_layers: raise ValueError( f"`temporal_transformer_layers_per_block` must be an integer or a tuple of integers of length {num_layers}" ) # support for variable number of attention head per temporal layers if isinstance(temporal_num_attention_heads, int): temporal_num_attention_heads = (temporal_num_attention_heads,) * num_layers elif len(temporal_num_attention_heads) != num_layers: raise ValueError( f"`temporal_num_attention_heads` must be an integer or a tuple of integers of length {num_layers}" ) for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) motion_modules.append( AnimateDiffTransformer3D( num_attention_heads=temporal_num_attention_heads[i], in_channels=out_channels, num_layers=temporal_transformer_layers_per_block[i], norm_num_groups=resnet_groups, cross_attention_dim=temporal_cross_attention_dim, attention_bias=False, activation_fn="geglu", positional_embeddings="sinusoidal", num_positional_embeddings=temporal_max_seq_length, attention_head_dim=out_channels // temporal_num_attention_heads[i], double_self_attention=temporal_double_self_attention, ) ) self.resnets = nn.ModuleList(resnets) self.motion_modules = nn.ModuleList(motion_modules) if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op", ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, num_frames: int = 1, *args, **kwargs, ) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) output_states = () blocks = zip(self.resnets, self.motion_modules) for resnet, motion_module in blocks: if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version(">=", "1.11.0"): hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(resnet), hidden_states, temb, use_reentrant=False, ) else: hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(resnet), hidden_states, temb ) else: hidden_states = resnet(hidden_states, temb) hidden_states = motion_module(hidden_states, num_frames=num_frames) output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states = output_states + (hidden_states,) return hidden_states, output_states class CrossAttnDownBlockMotion(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, transformer_layers_per_block: Union[int, Tuple[int]] = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads: int = 1, cross_attention_dim: int = 1280, output_scale_factor: float = 1.0, downsample_padding: int = 1, add_downsample: bool = True, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, attention_type: str = "default", temporal_cross_attention_dim: Optional[int] = None, temporal_num_attention_heads: int = 8, temporal_max_seq_length: int = 32, temporal_transformer_layers_per_block: Union[int, Tuple[int]] = 1, temporal_double_self_attention: bool = True, ): super().__init__() resnets = [] attentions = [] motion_modules = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads # support for variable transformer layers per block if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = (transformer_layers_per_block,) * num_layers elif len(transformer_layers_per_block) != num_layers: raise ValueError( f"transformer_layers_per_block must be an integer or a list of integers of length {num_layers}" ) # support for variable transformer layers per temporal block if isinstance(temporal_transformer_layers_per_block, int): temporal_transformer_layers_per_block = (temporal_transformer_layers_per_block,) * num_layers elif len(temporal_transformer_layers_per_block) != num_layers: raise ValueError( f"temporal_transformer_layers_per_block must be an integer or a list of integers of length {num_layers}" ) for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) if not dual_cross_attention: attentions.append( Transformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, attention_type=attention_type, ) ) else: attentions.append( DualTransformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) motion_modules.append( AnimateDiffTransformer3D( num_attention_heads=temporal_num_attention_heads, in_channels=out_channels, num_layers=temporal_transformer_layers_per_block[i], norm_num_groups=resnet_groups, cross_attention_dim=temporal_cross_attention_dim, attention_bias=False, activation_fn="geglu", positional_embeddings="sinusoidal", num_positional_embeddings=temporal_max_seq_length, attention_head_dim=out_channels // temporal_num_attention_heads, double_self_attention=temporal_double_self_attention, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.motion_modules = nn.ModuleList(motion_modules) if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op", ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, num_frames: int = 1, encoder_attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, additional_residuals: Optional[torch.Tensor] = None, ): if cross_attention_kwargs is not None: if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") output_states = () blocks = list(zip(self.resnets, self.attentions, self.motion_modules)) for i, (resnet, attn, motion_module) in enumerate(blocks): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs, ) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] else: hidden_states = resnet(hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] hidden_states = motion_module( hidden_states, num_frames=num_frames, ) # apply additional residuals to the output of the last pair of resnet and attention blocks if i == len(blocks) - 1 and additional_residuals is not None: hidden_states = hidden_states + additional_residuals output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states = output_states + (hidden_states,) return hidden_states, output_states class CrossAttnUpBlockMotion(nn.Module): def __init__( self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, resolution_idx: Optional[int] = None, dropout: float = 0.0, num_layers: int = 1, transformer_layers_per_block: Union[int, Tuple[int]] = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads: int = 1, cross_attention_dim: int = 1280, output_scale_factor: float = 1.0, add_upsample: bool = True, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, attention_type: str = "default", temporal_cross_attention_dim: Optional[int] = None, temporal_num_attention_heads: int = 8, temporal_max_seq_length: int = 32, temporal_transformer_layers_per_block: Union[int, Tuple[int]] = 1, ): super().__init__() resnets = [] attentions = [] motion_modules = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads # support for variable transformer layers per block if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = (transformer_layers_per_block,) * num_layers elif len(transformer_layers_per_block) != num_layers: raise ValueError( f"transformer_layers_per_block must be an integer or a list of integers of length {num_layers}, got {len(transformer_layers_per_block)}" ) # support for variable transformer layers per temporal block if isinstance(temporal_transformer_layers_per_block, int): temporal_transformer_layers_per_block = (temporal_transformer_layers_per_block,) * num_layers elif len(temporal_transformer_layers_per_block) != num_layers: raise ValueError( f"temporal_transformer_layers_per_block must be an integer or a list of integers of length {num_layers}, got {len(temporal_transformer_layers_per_block)}" ) for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) if not dual_cross_attention: attentions.append( Transformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, attention_type=attention_type, ) ) else: attentions.append( DualTransformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) motion_modules.append( AnimateDiffTransformer3D( num_attention_heads=temporal_num_attention_heads, in_channels=out_channels, num_layers=temporal_transformer_layers_per_block[i], norm_num_groups=resnet_groups, cross_attention_dim=temporal_cross_attention_dim, attention_bias=False, activation_fn="geglu", positional_embeddings="sinusoidal", num_positional_embeddings=temporal_max_seq_length, attention_head_dim=out_channels // temporal_num_attention_heads, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.motion_modules = nn.ModuleList(motion_modules) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, upsample_size: Optional[int] = None, attention_mask: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, num_frames: int = 1, ) -> torch.Tensor: if cross_attention_kwargs is not None: if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") is_freeu_enabled = ( getattr(self, "s1", None) and getattr(self, "s2", None) and getattr(self, "b1", None) and getattr(self, "b2", None) ) blocks = zip(self.resnets, self.attentions, self.motion_modules) for resnet, attn, motion_module in blocks: # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] # FreeU: Only operate on the first two stages if is_freeu_enabled: hidden_states, res_hidden_states = apply_freeu( self.resolution_idx, hidden_states, res_hidden_states, s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2, ) hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs, ) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] else: hidden_states = resnet(hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] hidden_states = motion_module( hidden_states, num_frames=num_frames, ) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states class UpBlockMotion(nn.Module): def __init__( self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, resolution_idx: Optional[int] = None, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor: float = 1.0, add_upsample: bool = True, temporal_cross_attention_dim: Optional[int] = None, temporal_num_attention_heads: int = 8, temporal_max_seq_length: int = 32, temporal_transformer_layers_per_block: Union[int, Tuple[int]] = 1, ): super().__init__() resnets = [] motion_modules = [] # support for variable transformer layers per temporal block if isinstance(temporal_transformer_layers_per_block, int): temporal_transformer_layers_per_block = (temporal_transformer_layers_per_block,) * num_layers elif len(temporal_transformer_layers_per_block) != num_layers: raise ValueError( f"temporal_transformer_layers_per_block must be an integer or a list of integers of length {num_layers}" ) for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) motion_modules.append( AnimateDiffTransformer3D( num_attention_heads=temporal_num_attention_heads, in_channels=out_channels, num_layers=temporal_transformer_layers_per_block[i], norm_num_groups=resnet_groups, cross_attention_dim=temporal_cross_attention_dim, attention_bias=False, activation_fn="geglu", positional_embeddings="sinusoidal", num_positional_embeddings=temporal_max_seq_length, attention_head_dim=out_channels // temporal_num_attention_heads, ) ) self.resnets = nn.ModuleList(resnets) self.motion_modules = nn.ModuleList(motion_modules) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, upsample_size=None, num_frames: int = 1, *args, **kwargs, ) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) is_freeu_enabled = ( getattr(self, "s1", None) and getattr(self, "s2", None) and getattr(self, "b1", None) and getattr(self, "b2", None) ) blocks = zip(self.resnets, self.motion_modules) for resnet, motion_module in blocks: # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] # FreeU: Only operate on the first two stages if is_freeu_enabled: hidden_states, res_hidden_states = apply_freeu( self.resolution_idx, hidden_states, res_hidden_states, s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2, ) hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version(">=", "1.11.0"): hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(resnet), hidden_states, temb, use_reentrant=False, ) else: hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(resnet), hidden_states, temb ) else: hidden_states = resnet(hidden_states, temb) hidden_states = motion_module(hidden_states, num_frames=num_frames) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states class UNetMidBlockCrossAttnMotion(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, transformer_layers_per_block: Union[int, Tuple[int]] = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads: int = 1, output_scale_factor: float = 1.0, cross_attention_dim: int = 1280, dual_cross_attention: bool = False, use_linear_projection: bool = False, upcast_attention: bool = False, attention_type: str = "default", temporal_num_attention_heads: int = 1, temporal_cross_attention_dim: Optional[int] = None, temporal_max_seq_length: int = 32, temporal_transformer_layers_per_block: Union[int, Tuple[int]] = 1, ): super().__init__() self.has_cross_attention = True self.num_attention_heads = num_attention_heads resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) # support for variable transformer layers per block if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = (transformer_layers_per_block,) * num_layers elif len(transformer_layers_per_block) != num_layers: raise ValueError( f"`transformer_layers_per_block` should be an integer or a list of integers of length {num_layers}." ) # support for variable transformer layers per temporal block if isinstance(temporal_transformer_layers_per_block, int): temporal_transformer_layers_per_block = (temporal_transformer_layers_per_block,) * num_layers elif len(temporal_transformer_layers_per_block) != num_layers: raise ValueError( f"`temporal_transformer_layers_per_block` should be an integer or a list of integers of length {num_layers}." ) # there is always at least one resnet resnets = [ ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ] attentions = [] motion_modules = [] for i in range(num_layers): if not dual_cross_attention: attentions.append( Transformer2DModel( num_attention_heads, in_channels // num_attention_heads, in_channels=in_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type=attention_type, ) ) else: attentions.append( DualTransformer2DModel( num_attention_heads, in_channels // num_attention_heads, in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) motion_modules.append( AnimateDiffTransformer3D( num_attention_heads=temporal_num_attention_heads, attention_head_dim=in_channels // temporal_num_attention_heads, in_channels=in_channels, num_layers=temporal_transformer_layers_per_block[i], norm_num_groups=resnet_groups, cross_attention_dim=temporal_cross_attention_dim, attention_bias=False, positional_embeddings="sinusoidal", num_positional_embeddings=temporal_max_seq_length, activation_fn="geglu", ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.motion_modules = nn.ModuleList(motion_modules) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, num_frames: int = 1, ) -> torch.Tensor: if cross_attention_kwargs is not None: if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") hidden_states = self.resnets[0](hidden_states, temb) blocks = zip(self.attentions, self.resnets[1:], self.motion_modules) for attn, resnet, motion_module in blocks: if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(motion_module), hidden_states, temb, **ckpt_kwargs, ) hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs, ) else: hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] hidden_states = motion_module( hidden_states, num_frames=num_frames, ) hidden_states = resnet(hidden_states, temb) return hidden_states class MotionModules(nn.Module): def __init__( self, in_channels: int, layers_per_block: int = 2, transformer_layers_per_block: Union[int, Tuple[int]] = 8, num_attention_heads: Union[int, Tuple[int]] = 8, attention_bias: bool = False, cross_attention_dim: Optional[int] = None, activation_fn: str = "geglu", norm_num_groups: int = 32, max_seq_length: int = 32, ): super().__init__() self.motion_modules = nn.ModuleList([]) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = (transformer_layers_per_block,) * layers_per_block elif len(transformer_layers_per_block) != layers_per_block: raise ValueError( f"The number of transformer layers per block must match the number of layers per block, " f"got {layers_per_block} and {len(transformer_layers_per_block)}" ) for i in range(layers_per_block): self.motion_modules.append( AnimateDiffTransformer3D( in_channels=in_channels, num_layers=transformer_layers_per_block[i], norm_num_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, attention_bias=attention_bias, num_attention_heads=num_attention_heads, attention_head_dim=in_channels // num_attention_heads, positional_embeddings="sinusoidal", num_positional_embeddings=max_seq_length, ) ) class MotionAdapter(ModelMixin, ConfigMixin, FromOriginalModelMixin): @register_to_config def __init__( self, block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), motion_layers_per_block: Union[int, Tuple[int]] = 2, motion_transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple[int]]] = 1, motion_mid_block_layers_per_block: int = 1, motion_transformer_layers_per_mid_block: Union[int, Tuple[int]] = 1, motion_num_attention_heads: Union[int, Tuple[int]] = 8, motion_norm_num_groups: int = 32, motion_max_seq_length: int = 32, use_motion_mid_block: bool = True, conv_in_channels: Optional[int] = None, ): """Container to store AnimateDiff Motion Modules Args: block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each UNet block. motion_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 2): The number of motion layers per UNet block. motion_transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple[int]]`, *optional*, defaults to 1): The number of transformer layers to use in each motion layer in each block. motion_mid_block_layers_per_block (`int`, *optional*, defaults to 1): The number of motion layers in the middle UNet block. motion_transformer_layers_per_mid_block (`int` or `Tuple[int]`, *optional*, defaults to 1): The number of transformer layers to use in each motion layer in the middle block. motion_num_attention_heads (`int` or `Tuple[int]`, *optional*, defaults to 8): The number of heads to use in each attention layer of the motion module. motion_norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use in each group normalization layer of the motion module. motion_max_seq_length (`int`, *optional*, defaults to 32): The maximum sequence length to use in the motion module. use_motion_mid_block (`bool`, *optional*, defaults to True): Whether to use a motion module in the middle of the UNet. """ super().__init__() down_blocks = [] up_blocks = [] if isinstance(motion_layers_per_block, int): motion_layers_per_block = (motion_layers_per_block,) * len(block_out_channels) elif len(motion_layers_per_block) != len(block_out_channels): raise ValueError( f"The number of motion layers per block must match the number of blocks, " f"got {len(block_out_channels)} and {len(motion_layers_per_block)}" ) if isinstance(motion_transformer_layers_per_block, int): motion_transformer_layers_per_block = (motion_transformer_layers_per_block,) * len(block_out_channels) if isinstance(motion_transformer_layers_per_mid_block, int): motion_transformer_layers_per_mid_block = ( motion_transformer_layers_per_mid_block, ) * motion_mid_block_layers_per_block elif len(motion_transformer_layers_per_mid_block) != motion_mid_block_layers_per_block: raise ValueError( f"The number of layers per mid block ({motion_mid_block_layers_per_block}) " f"must match the length of motion_transformer_layers_per_mid_block ({len(motion_transformer_layers_per_mid_block)})" ) if isinstance(motion_num_attention_heads, int): motion_num_attention_heads = (motion_num_attention_heads,) * len(block_out_channels) elif len(motion_num_attention_heads) != len(block_out_channels): raise ValueError( f"The length of the attention head number tuple in the motion module must match the " f"number of block, got {len(motion_num_attention_heads)} and {len(block_out_channels)}" ) if conv_in_channels: # input self.conv_in = nn.Conv2d(conv_in_channels, block_out_channels[0], kernel_size=3, padding=1) else: self.conv_in = None for i, channel in enumerate(block_out_channels): output_channel = block_out_channels[i] down_blocks.append( MotionModules( in_channels=output_channel, norm_num_groups=motion_norm_num_groups, cross_attention_dim=None, activation_fn="geglu", attention_bias=False, num_attention_heads=motion_num_attention_heads[i], max_seq_length=motion_max_seq_length, layers_per_block=motion_layers_per_block[i], transformer_layers_per_block=motion_transformer_layers_per_block[i], ) ) if use_motion_mid_block: self.mid_block = MotionModules( in_channels=block_out_channels[-1], norm_num_groups=motion_norm_num_groups, cross_attention_dim=None, activation_fn="geglu", attention_bias=False, num_attention_heads=motion_num_attention_heads[-1], max_seq_length=motion_max_seq_length, layers_per_block=motion_mid_block_layers_per_block, transformer_layers_per_block=motion_transformer_layers_per_mid_block, ) else: self.mid_block = None reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] reversed_motion_layers_per_block = list(reversed(motion_layers_per_block)) reversed_motion_transformer_layers_per_block = list(reversed(motion_transformer_layers_per_block)) reversed_motion_num_attention_heads = list(reversed(motion_num_attention_heads)) for i, channel in enumerate(reversed_block_out_channels): output_channel = reversed_block_out_channels[i] up_blocks.append( MotionModules( in_channels=output_channel, norm_num_groups=motion_norm_num_groups, cross_attention_dim=None, activation_fn="geglu", attention_bias=False, num_attention_heads=reversed_motion_num_attention_heads[i], max_seq_length=motion_max_seq_length, layers_per_block=reversed_motion_layers_per_block[i] + 1, transformer_layers_per_block=reversed_motion_transformer_layers_per_block[i], ) ) self.down_blocks = nn.ModuleList(down_blocks) self.up_blocks = nn.ModuleList(up_blocks) def forward(self, sample): pass class UNetMotionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin, PeftAdapterMixin): r""" A modified conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 4, out_channels: int = 4, down_block_types: Tuple[str, ...] = ( "CrossAttnDownBlockMotion", "CrossAttnDownBlockMotion", "CrossAttnDownBlockMotion", "DownBlockMotion", ), up_block_types: Tuple[str, ...] = ( "UpBlockMotion", "CrossAttnUpBlockMotion", "CrossAttnUpBlockMotion", "CrossAttnUpBlockMotion", ), block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), layers_per_block: Union[int, Tuple[int]] = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: int = 32, norm_eps: float = 1e-5, cross_attention_dim: int = 1280, transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1, reverse_transformer_layers_per_block: Optional[Union[int, Tuple[int], Tuple[Tuple]]] = None, temporal_transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1, reverse_temporal_transformer_layers_per_block: Optional[Union[int, Tuple[int], Tuple[Tuple]]] = None, transformer_layers_per_mid_block: Optional[Union[int, Tuple[int]]] = None, temporal_transformer_layers_per_mid_block: Optional[Union[int, Tuple[int]]] = 1, use_linear_projection: bool = False, num_attention_heads: Union[int, Tuple[int, ...]] = 8, motion_max_seq_length: int = 32, motion_num_attention_heads: Union[int, Tuple[int, ...]] = 8, reverse_motion_num_attention_heads: Optional[Union[int, Tuple[int, ...], Tuple[Tuple[int, ...], ...]]] = None, use_motion_mid_block: bool = True, mid_block_layers: int = 1, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, projection_class_embeddings_input_dim: Optional[int] = None, time_cond_proj_dim: Optional[int] = None, ): super().__init__() self.sample_size = sample_size # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." ) if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError( f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." ) if isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None: for layer_number_per_block in transformer_layers_per_block: if isinstance(layer_number_per_block, list): raise ValueError("Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.") if ( isinstance(temporal_transformer_layers_per_block, list) and reverse_temporal_transformer_layers_per_block is None ): for layer_number_per_block in temporal_transformer_layers_per_block: if isinstance(layer_number_per_block, list): raise ValueError( "Must provide 'reverse_temporal_transformer_layers_per_block` if using asymmetrical motion module in UNet." ) # input conv_in_kernel = 3 conv_out_kernel = 3 conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) # time time_embed_dim = block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], True, 0) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, cond_proj_dim=time_cond_proj_dim ) if encoder_hid_dim_type is None: self.encoder_hid_proj = None if addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, True, 0) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) # class embedding self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if isinstance(reverse_transformer_layers_per_block, int): reverse_transformer_layers_per_block = [reverse_transformer_layers_per_block] * len(down_block_types) if isinstance(temporal_transformer_layers_per_block, int): temporal_transformer_layers_per_block = [temporal_transformer_layers_per_block] * len(down_block_types) if isinstance(reverse_temporal_transformer_layers_per_block, int): reverse_temporal_transformer_layers_per_block = [reverse_temporal_transformer_layers_per_block] * len( down_block_types ) if isinstance(motion_num_attention_heads, int): motion_num_attention_heads = (motion_num_attention_heads,) * len(down_block_types) # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 if down_block_type == "CrossAttnDownBlockMotion": down_block = CrossAttnDownBlockMotion( in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, num_attention_heads=num_attention_heads[i], cross_attention_dim=cross_attention_dim[i], downsample_padding=downsample_padding, add_downsample=not is_final_block, use_linear_projection=use_linear_projection, temporal_num_attention_heads=motion_num_attention_heads[i], temporal_max_seq_length=motion_max_seq_length, temporal_transformer_layers_per_block=temporal_transformer_layers_per_block[i], ) elif down_block_type == "DownBlockMotion": down_block = DownBlockMotion( in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, num_layers=layers_per_block[i], resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, add_downsample=not is_final_block, downsample_padding=downsample_padding, temporal_num_attention_heads=motion_num_attention_heads[i], temporal_max_seq_length=motion_max_seq_length, temporal_transformer_layers_per_block=temporal_transformer_layers_per_block[i], ) else: raise ValueError( "Invalid `down_block_type` encountered. Must be one of `CrossAttnDownBlockMotion` or `DownBlockMotion`" ) self.down_blocks.append(down_block) # mid if transformer_layers_per_mid_block is None: transformer_layers_per_mid_block = ( transformer_layers_per_block[-1] if isinstance(transformer_layers_per_block[-1], int) else 1 ) if use_motion_mid_block: self.mid_block = UNetMidBlockCrossAttnMotion( in_channels=block_out_channels[-1], temb_channels=time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=False, use_linear_projection=use_linear_projection, num_layers=mid_block_layers, temporal_num_attention_heads=motion_num_attention_heads[-1], temporal_max_seq_length=motion_max_seq_length, transformer_layers_per_block=transformer_layers_per_mid_block, temporal_transformer_layers_per_block=temporal_transformer_layers_per_mid_block, ) else: self.mid_block = UNetMidBlock2DCrossAttn( in_channels=block_out_channels[-1], temb_channels=time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=False, use_linear_projection=use_linear_projection, num_layers=mid_block_layers, transformer_layers_per_block=transformer_layers_per_mid_block, ) # count how many layers upsample the images self.num_upsamplers = 0 # up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_motion_num_attention_heads = list(reversed(motion_num_attention_heads)) if reverse_transformer_layers_per_block is None: reverse_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) if reverse_temporal_transformer_layers_per_block is None: reverse_temporal_transformer_layers_per_block = list(reversed(temporal_transformer_layers_per_block)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False if up_block_type == "CrossAttnUpBlockMotion": up_block = CrossAttnUpBlockMotion( in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=time_embed_dim, resolution_idx=i, num_layers=reversed_layers_per_block[i] + 1, transformer_layers_per_block=reverse_transformer_layers_per_block[i], resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, num_attention_heads=reversed_num_attention_heads[i], cross_attention_dim=reversed_cross_attention_dim[i], add_upsample=add_upsample, use_linear_projection=use_linear_projection, temporal_num_attention_heads=reversed_motion_num_attention_heads[i], temporal_max_seq_length=motion_max_seq_length, temporal_transformer_layers_per_block=reverse_temporal_transformer_layers_per_block[i], ) elif up_block_type == "UpBlockMotion": up_block = UpBlockMotion( in_channels=input_channel, prev_output_channel=prev_output_channel, out_channels=output_channel, temb_channels=time_embed_dim, resolution_idx=i, num_layers=reversed_layers_per_block[i] + 1, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, add_upsample=add_upsample, temporal_num_attention_heads=reversed_motion_num_attention_heads[i], temporal_max_seq_length=motion_max_seq_length, temporal_transformer_layers_per_block=reverse_temporal_transformer_layers_per_block[i], ) else: raise ValueError( "Invalid `up_block_type` encountered. Must be one of `CrossAttnUpBlockMotion` or `UpBlockMotion`" ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out if norm_num_groups is not None: self.conv_norm_out = nn.GroupNorm( num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps ) self.conv_act = nn.SiLU() else: self.conv_norm_out = None self.conv_act = None conv_out_padding = (conv_out_kernel - 1) // 2 self.conv_out = nn.Conv2d( block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding ) @classmethod def from_unet2d( cls, unet: UNet2DConditionModel, motion_adapter: Optional[MotionAdapter] = None, load_weights: bool = True, ): has_motion_adapter = motion_adapter is not None if has_motion_adapter: motion_adapter.to(device=unet.device) # check compatibility of number of blocks if len(unet.config["down_block_types"]) != len(motion_adapter.config["block_out_channels"]): raise ValueError("Incompatible Motion Adapter, got different number of blocks") # check layers compatibility for each block if isinstance(unet.config["layers_per_block"], int): expanded_layers_per_block = [unet.config["layers_per_block"]] * len(unet.config["down_block_types"]) else: expanded_layers_per_block = list(unet.config["layers_per_block"]) if isinstance(motion_adapter.config["motion_layers_per_block"], int): expanded_adapter_layers_per_block = [motion_adapter.config["motion_layers_per_block"]] * len( motion_adapter.config["block_out_channels"] ) else: expanded_adapter_layers_per_block = list(motion_adapter.config["motion_layers_per_block"]) if expanded_layers_per_block != expanded_adapter_layers_per_block: raise ValueError("Incompatible Motion Adapter, got different number of layers per block") # based on https://github.com/guoyww/AnimateDiff/blob/895f3220c06318ea0760131ec70408b466c49333/animatediff/models/unet.py#L459 config = dict(unet.config) config["_class_name"] = cls.__name__ down_blocks = [] for down_blocks_type in config["down_block_types"]: if "CrossAttn" in down_blocks_type: down_blocks.append("CrossAttnDownBlockMotion") else: down_blocks.append("DownBlockMotion") config["down_block_types"] = down_blocks up_blocks = [] for down_blocks_type in config["up_block_types"]: if "CrossAttn" in down_blocks_type: up_blocks.append("CrossAttnUpBlockMotion") else: up_blocks.append("UpBlockMotion") config["up_block_types"] = up_blocks if has_motion_adapter: config["motion_num_attention_heads"] = motion_adapter.config["motion_num_attention_heads"] config["motion_max_seq_length"] = motion_adapter.config["motion_max_seq_length"] config["use_motion_mid_block"] = motion_adapter.config["use_motion_mid_block"] config["layers_per_block"] = motion_adapter.config["motion_layers_per_block"] config["temporal_transformer_layers_per_mid_block"] = motion_adapter.config[ "motion_transformer_layers_per_mid_block" ] config["temporal_transformer_layers_per_block"] = motion_adapter.config[ "motion_transformer_layers_per_block" ] config["motion_num_attention_heads"] = motion_adapter.config["motion_num_attention_heads"] # For PIA UNets we need to set the number input channels to 9 if motion_adapter.config["conv_in_channels"]: config["in_channels"] = motion_adapter.config["conv_in_channels"] # Need this for backwards compatibility with UNet2DConditionModel checkpoints if not config.get("num_attention_heads"): config["num_attention_heads"] = config["attention_head_dim"] expected_kwargs, optional_kwargs = cls._get_signature_keys(cls) config = FrozenDict({k: config.get(k) for k in config if k in expected_kwargs or k in optional_kwargs}) config["_class_name"] = cls.__name__ model = cls.from_config(config) if not load_weights: return model # Logic for loading PIA UNets which allow the first 4 channels to be any UNet2DConditionModel conv_in weight # while the last 5 channels must be PIA conv_in weights. if has_motion_adapter and motion_adapter.config["conv_in_channels"]: model.conv_in = motion_adapter.conv_in updated_conv_in_weight = torch.cat( [unet.conv_in.weight, motion_adapter.conv_in.weight[:, 4:, :, :]], dim=1 ) model.conv_in.load_state_dict({"weight": updated_conv_in_weight, "bias": unet.conv_in.bias}) else: model.conv_in.load_state_dict(unet.conv_in.state_dict()) model.time_proj.load_state_dict(unet.time_proj.state_dict()) model.time_embedding.load_state_dict(unet.time_embedding.state_dict()) if any( isinstance(proc, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)) for proc in unet.attn_processors.values() ): attn_procs = {} for name, processor in unet.attn_processors.items(): if name.endswith("attn1.processor"): attn_processor_class = ( AttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else AttnProcessor ) attn_procs[name] = attn_processor_class() else: attn_processor_class = ( IPAdapterAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else IPAdapterAttnProcessor ) attn_procs[name] = attn_processor_class( hidden_size=processor.hidden_size, cross_attention_dim=processor.cross_attention_dim, scale=processor.scale, num_tokens=processor.num_tokens, ) for name, processor in model.attn_processors.items(): if name not in attn_procs: attn_procs[name] = processor.__class__() model.set_attn_processor(attn_procs) model.config.encoder_hid_dim_type = "ip_image_proj" model.encoder_hid_proj = unet.encoder_hid_proj for i, down_block in enumerate(unet.down_blocks): model.down_blocks[i].resnets.load_state_dict(down_block.resnets.state_dict()) if hasattr(model.down_blocks[i], "attentions"): model.down_blocks[i].attentions.load_state_dict(down_block.attentions.state_dict()) if model.down_blocks[i].downsamplers: model.down_blocks[i].downsamplers.load_state_dict(down_block.downsamplers.state_dict()) for i, up_block in enumerate(unet.up_blocks): model.up_blocks[i].resnets.load_state_dict(up_block.resnets.state_dict()) if hasattr(model.up_blocks[i], "attentions"): model.up_blocks[i].attentions.load_state_dict(up_block.attentions.state_dict()) if model.up_blocks[i].upsamplers: model.up_blocks[i].upsamplers.load_state_dict(up_block.upsamplers.state_dict()) model.mid_block.resnets.load_state_dict(unet.mid_block.resnets.state_dict()) model.mid_block.attentions.load_state_dict(unet.mid_block.attentions.state_dict()) if unet.conv_norm_out is not None: model.conv_norm_out.load_state_dict(unet.conv_norm_out.state_dict()) if unet.conv_act is not None: model.conv_act.load_state_dict(unet.conv_act.state_dict()) model.conv_out.load_state_dict(unet.conv_out.state_dict()) if has_motion_adapter: model.load_motion_modules(motion_adapter) # ensure that the Motion UNet is the same dtype as the UNet2DConditionModel model.to(unet.dtype) return model def freeze_unet2d_params(self) -> None: """Freeze the weights of just the UNet2DConditionModel, and leave the motion modules unfrozen for fine tuning. """ # Freeze everything for param in self.parameters(): param.requires_grad = False # Unfreeze Motion Modules for down_block in self.down_blocks: motion_modules = down_block.motion_modules for param in motion_modules.parameters(): param.requires_grad = True for up_block in self.up_blocks: motion_modules = up_block.motion_modules for param in motion_modules.parameters(): param.requires_grad = True if hasattr(self.mid_block, "motion_modules"): motion_modules = self.mid_block.motion_modules for param in motion_modules.parameters(): param.requires_grad = True def load_motion_modules(self, motion_adapter: Optional[MotionAdapter]) -> None: for i, down_block in enumerate(motion_adapter.down_blocks): self.down_blocks[i].motion_modules.load_state_dict(down_block.motion_modules.state_dict()) for i, up_block in enumerate(motion_adapter.up_blocks): self.up_blocks[i].motion_modules.load_state_dict(up_block.motion_modules.state_dict()) # to support older motion modules that don't have a mid_block if hasattr(self.mid_block, "motion_modules"): self.mid_block.motion_modules.load_state_dict(motion_adapter.mid_block.motion_modules.state_dict()) def save_motion_modules( self, save_directory: str, is_main_process: bool = True, safe_serialization: bool = True, variant: Optional[str] = None, push_to_hub: bool = False, **kwargs, ) -> None: state_dict = self.state_dict() # Extract all motion modules motion_state_dict = {} for k, v in state_dict.items(): if "motion_modules" in k: motion_state_dict[k] = v adapter = MotionAdapter( block_out_channels=self.config["block_out_channels"], motion_layers_per_block=self.config["layers_per_block"], motion_norm_num_groups=self.config["norm_num_groups"], motion_num_attention_heads=self.config["motion_num_attention_heads"], motion_max_seq_length=self.config["motion_max_seq_length"], use_motion_mid_block=self.config["use_motion_mid_block"], ) adapter.load_state_dict(motion_state_dict) adapter.save_pretrained( save_directory=save_directory, is_main_process=is_main_process, safe_serialization=safe_serialization, variant=variant, push_to_hub=push_to_hub, **kwargs, ) @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None: """ Sets the attention processor to use [feed forward chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers). Parameters: chunk_size (`int`, *optional*): The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually over each tensor of dim=`dim`. dim (`int`, *optional*, defaults to `0`): The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch) or dim=1 (sequence length). """ if dim not in [0, 1]: raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}") # By default chunk size is 1 chunk_size = chunk_size or 1 def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, "set_chunk_feed_forward"): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, chunk_size, dim) def disable_forward_chunking(self) -> None: def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, "set_chunk_feed_forward"): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, None, 0) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self) -> None: """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) def _set_gradient_checkpointing(self, module, value: bool = False) -> None: if isinstance(module, (CrossAttnDownBlockMotion, DownBlockMotion, CrossAttnUpBlockMotion, UpBlockMotion)): module.gradient_checkpointing = value # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.enable_freeu def enable_freeu(self, s1: float, s2: float, b1: float, b2: float) -> None: r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497. The suffixes after the scaling factors represent the stage blocks where they are being applied. Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL. Args: s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process. s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process. b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. """ for i, upsample_block in enumerate(self.up_blocks): setattr(upsample_block, "s1", s1) setattr(upsample_block, "s2", s2) setattr(upsample_block, "b1", b1) setattr(upsample_block, "b2", b2) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.disable_freeu def disable_freeu(self) -> None: """Disables the FreeU mechanism.""" freeu_keys = {"s1", "s2", "b1", "b2"} for i, upsample_block in enumerate(self.up_blocks): for k in freeu_keys: if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None: setattr(upsample_block, k, None) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ self.original_attn_processors = None for _, attn_processor in self.attn_processors.items(): if "Added" in str(attn_processor.__class__.__name__): raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedAttnProcessor2_0()) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def forward( self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, timestep_cond: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, mid_block_additional_residual: Optional[torch.Tensor] = None, return_dict: bool = True, ) -> Union[UNetMotionOutput, Tuple[torch.Tensor]]: r""" The [`UNetMotionModel`] forward method. Args: sample (`torch.Tensor`): The noisy input tensor with the following shape `(batch, num_frames, channel, height, width`. timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.Tensor`): The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`): Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed through the `self.time_embedding` layer to obtain the timestep embeddings. attention_mask (`torch.Tensor`, *optional*, defaults to `None`): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*): A tuple of tensors that if specified are added to the residuals of down unet blocks. mid_block_additional_residual: (`torch.Tensor`, *optional*): A tensor that if specified is added to the residual of the middle unet block. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unets.unet_motion_model.UNetMotionOutput`] instead of a plain tuple. Returns: [`~models.unets.unet_motion_model.UNetMotionOutput`] or `tuple`: If `return_dict` is True, an [`~models.unets.unet_motion_model.UNetMotionOutput`] is returned, otherwise a `tuple` is returned where the first element is the sample tensor. """ # By default samples have to be AT least a multiple of the overall upsampling factor. # The overall upsampling factor is equal to 2 ** (# num of upsampling layears). # However, the upsampling interpolation output size can be forced to fit any upsampling size # on the fly if necessary. default_overall_up_factor = 2**self.num_upsamplers # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` forward_upsample_size = False upsample_size = None if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): logger.info("Forward upsample size to force interpolation output size.") forward_upsample_size = True # prepare attention_mask if attention_mask is not None: attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML num_frames = sample.shape[2] timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=self.dtype) emb = self.time_embedding(t_emb, timestep_cond) aug_emb = None if self.config.addition_embed_type == "text_time": if "text_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" ) text_embeds = added_cond_kwargs.get("text_embeds") if "time_ids" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" ) time_ids = added_cond_kwargs.get("time_ids") time_embeds = self.add_time_proj(time_ids.flatten()) time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) add_embeds = add_embeds.to(emb.dtype) aug_emb = self.add_embedding(add_embeds) emb = emb if aug_emb is None else emb + aug_emb emb = emb.repeat_interleave(repeats=num_frames, dim=0) if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "ip_image_proj": if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" ) image_embeds = added_cond_kwargs.get("image_embeds") image_embeds = self.encoder_hid_proj(image_embeds) image_embeds = [image_embed.repeat_interleave(repeats=num_frames, dim=0) for image_embed in image_embeds] encoder_hidden_states = (encoder_hidden_states, image_embeds) # 2. pre-process sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:]) sample = self.conv_in(sample) # 3. down down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, ) else: sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames) down_block_res_samples += res_samples if down_block_additional_residuals is not None: new_down_block_res_samples = () for down_block_res_sample, down_block_additional_residual in zip( down_block_res_samples, down_block_additional_residuals ): down_block_res_sample = down_block_res_sample + down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) down_block_res_samples = new_down_block_res_samples # 4. mid if self.mid_block is not None: # To support older versions of motion modules that don't have a mid_block if hasattr(self.mid_block, "motion_modules"): sample = self.mid_block( sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, ) else: sample = self.mid_block( sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, ) if mid_block_additional_residual is not None: sample = sample + mid_block_additional_residual # 5. up for i, upsample_block in enumerate(self.up_blocks): is_final_block = i == len(self.up_blocks) - 1 res_samples = down_block_res_samples[-len(upsample_block.resnets) :] down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] # if we have not reached the final block and need to forward the # upsample size, we do it here if not is_final_block and forward_upsample_size: upsample_size = down_block_res_samples[-1].shape[2:] if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, upsample_size=upsample_size, attention_mask=attention_mask, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, ) else: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, num_frames=num_frames, ) # 6. post-process if self.conv_norm_out: sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) # reshape to (batch, channel, framerate, width, height) sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4) if not return_dict: return (sample,) return UNetMotionOutput(sample=sample)
diffusers/src/diffusers/models/unets/unet_motion_model.py/0
{ "file_path": "diffusers/src/diffusers/models/unets/unet_motion_model.py", "repo_id": "diffusers", "token_count": 50330 }
140
# Copyright 2024 Salesforce.com, inc. # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Tuple, Union import torch from torch import nn from transformers import CLIPPreTrainedModel from transformers.modeling_outputs import BaseModelOutputWithPooling from transformers.models.clip.configuration_clip import CLIPTextConfig from transformers.models.clip.modeling_clip import CLIPEncoder def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) # This is a modified version of the CLIPTextModel from transformers.models.clip.modeling_clip # Which allows for an extra input of "context embeddings", which are the query embeddings used in Qformer # They pass through the clip model, along with the text embeddings, and interact with them using self attention class ContextCLIPTextModel(CLIPPreTrainedModel): config_class = CLIPTextConfig _no_split_modules = ["CLIPEncoderLayer"] def __init__(self, config: CLIPTextConfig): super().__init__(config) self.text_model = ContextCLIPTextTransformer(config) # Initialize weights and apply final processing self.post_init() def forward( self, ctx_embeddings: torch.Tensor = None, ctx_begin_pos: list = None, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: return self.text_model( ctx_embeddings=ctx_embeddings, ctx_begin_pos=ctx_begin_pos, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) class ContextCLIPTextTransformer(nn.Module): def __init__(self, config: CLIPTextConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = ContextCLIPTextEmbeddings(config) self.encoder = CLIPEncoder(config) self.final_layer_norm = nn.LayerNorm(embed_dim) def forward( self, ctx_embeddings: torch.Tensor, ctx_begin_pos: list, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None: raise ValueError("You have to specify either input_ids") input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings( input_ids=input_ids, position_ids=position_ids, ctx_embeddings=ctx_embeddings, ctx_begin_pos=ctx_begin_pos, ) bsz, seq_len = input_shape if ctx_embeddings is not None: seq_len += ctx_embeddings.size(1) # CLIP's text model uses causal mask, prepare it here. # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 causal_attention_mask = self._build_causal_attention_mask(bsz, seq_len, hidden_states.dtype).to( hidden_states.device ) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask, hidden_states.dtype) encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) # text_embeds.shape = [batch_size, sequence_length, transformer.width] # take features from the eot embedding (eot_token is the highest number in each sequence) # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14 pooled_output = last_hidden_state[ torch.arange(last_hidden_state.shape[0], device=input_ids.device), input_ids.to(torch.int).argmax(dim=-1), ] if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def _build_causal_attention_mask(self, bsz, seq_len, dtype): # lazily create causal attention mask, with full attention between the vision tokens # pytorch uses additive attention mask; fill with -inf mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype) mask.fill_(torch.tensor(torch.finfo(dtype).min)) mask.triu_(1) # zero out the lower diagonal mask = mask.unsqueeze(1) # expand mask return mask class ContextCLIPTextEmbeddings(nn.Module): def __init__(self, config: CLIPTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) def forward( self, ctx_embeddings: torch.Tensor, ctx_begin_pos: list, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.Tensor] = None, ) -> torch.Tensor: if ctx_embeddings is None: ctx_len = 0 else: ctx_len = ctx_embeddings.shape[1] seq_length = (input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]) + ctx_len if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) # for each input embeddings, add the ctx embeddings at the correct position input_embeds_ctx = [] bsz = inputs_embeds.shape[0] if ctx_embeddings is not None: for i in range(bsz): cbp = ctx_begin_pos[i] prefix = inputs_embeds[i, :cbp] # remove the special token embedding suffix = inputs_embeds[i, cbp:] input_embeds_ctx.append(torch.cat([prefix, ctx_embeddings[i], suffix], dim=0)) inputs_embeds = torch.stack(input_embeds_ctx, dim=0) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings
diffusers/src/diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py", "repo_id": "diffusers", "token_count": 3808 }
141
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["pipeline_hunyuandit_controlnet"] = ["HunyuanDiTControlNetPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_hunyuandit_controlnet import HunyuanDiTControlNetPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/controlnet_hunyuandit/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/controlnet_hunyuandit/__init__.py", "repo_id": "diffusers", "token_count": 530 }
142
from typing import TYPE_CHECKING from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = { "mel": ["Mel"], "pipeline_audio_diffusion": ["AudioDiffusionPipeline"], } if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .mel import Mel from .pipeline_audio_diffusion import AudioDiffusionPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, )
diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/__init__.py", "repo_id": "diffusers", "token_count": 212 }
143
from typing import TYPE_CHECKING from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["pipeline_cycle_diffusion"] = ["CycleDiffusionPipeline"] _import_structure["pipeline_stable_diffusion_inpaint_legacy"] = ["StableDiffusionInpaintPipelineLegacy"] _import_structure["pipeline_stable_diffusion_model_editing"] = ["StableDiffusionModelEditingPipeline"] _import_structure["pipeline_stable_diffusion_paradigms"] = ["StableDiffusionParadigmsPipeline"] _import_structure["pipeline_stable_diffusion_pix2pix_zero"] = ["StableDiffusionPix2PixZeroPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_pix2pix_zero import StableDiffusionPix2PixZeroPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py", "repo_id": "diffusers", "token_count": 817 }
144
# Copyright 2024 Microsoft and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ....configuration_utils import ConfigMixin, register_to_config from ....models import ModelMixin, Transformer2DModel, VQModel from ....schedulers import VQDiffusionScheduler from ....utils import logging from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name class LearnedClassifierFreeSamplingEmbeddings(ModelMixin, ConfigMixin): """ Utility class for storing learned text embeddings for classifier free sampling """ @register_to_config def __init__(self, learnable: bool, hidden_size: Optional[int] = None, length: Optional[int] = None): super().__init__() self.learnable = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" embeddings = torch.zeros(length, hidden_size) else: embeddings = None self.embeddings = torch.nn.Parameter(embeddings) class VQDiffusionPipeline(DiffusionPipeline): r""" Pipeline for text-to-image generation using VQ Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vqvae ([`VQModel`]): Vector Quantized Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. transformer ([`Transformer2DModel`]): A conditional `Transformer2DModel` to denoise the encoded image latents. scheduler ([`VQDiffusionScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. """ vqvae: VQModel text_encoder: CLIPTextModel tokenizer: CLIPTokenizer transformer: Transformer2DModel learned_classifier_free_sampling_embeddings: LearnedClassifierFreeSamplingEmbeddings scheduler: VQDiffusionScheduler def __init__( self, vqvae: VQModel, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, transformer: Transformer2DModel, scheduler: VQDiffusionScheduler, learned_classifier_free_sampling_embeddings: LearnedClassifierFreeSamplingEmbeddings, ): super().__init__() self.register_modules( vqvae=vqvae, transformer=transformer, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, learned_classifier_free_sampling_embeddings=learned_classifier_free_sampling_embeddings, ) def _encode_prompt(self, prompt, num_images_per_prompt, do_classifier_free_guidance): batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ) text_input_ids = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] prompt_embeds = self.text_encoder(text_input_ids.to(self.device))[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 prompt_embeds = prompt_embeds / prompt_embeds.norm(dim=-1, keepdim=True) # duplicate text embeddings for each generation per prompt prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: negative_prompt_embeds = self.learned_classifier_free_sampling_embeddings.embeddings negative_prompt_embeds = negative_prompt_embeds.unsqueeze(0).repeat(batch_size, 1, 1) else: uncond_tokens = [""] * batch_size max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # See comment for normalizing text embeddings negative_prompt_embeds = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1, keepdim=True) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], num_inference_steps: int = 100, guidance_scale: float = 5.0, truncation_rate: float = 1.0, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, ) -> Union[ImagePipelineOutput, Tuple]: """ The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide image generation. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. truncation_rate (`float`, *optional*, defaults to 1.0 (equivalent to no truncation)): Used to "truncate" the predicted classes for x_0 such that the cumulative probability for a pixel is at most `truncation_rate`. The lowest probabilities that would increase the cumulative probability above `truncation_rate` are set to zero. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor` of shape (batch), *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Must be valid embedding indices.If not provided, a latents tensor will be generated of completely masked latent pixels. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_prompt(prompt, num_images_per_prompt, do_classifier_free_guidance) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) # get the initial completely masked latents unless the user supplied it latents_shape = (batch_size, self.transformer.num_latent_pixels) if latents is None: mask_class = self.transformer.num_vector_embeds - 1 latents = torch.full(latents_shape, mask_class).to(self.device) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," f" {self.transformer.num_vector_embeds - 1} (inclusive)." ) latents = latents.to(self.device) # set timesteps self.scheduler.set_timesteps(num_inference_steps, device=self.device) timesteps_tensor = self.scheduler.timesteps.to(self.device) sample = latents for i, t in enumerate(self.progress_bar(timesteps_tensor)): # expand the sample if we are doing classifier free guidance latent_model_input = torch.cat([sample] * 2) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` model_output = self.transformer(latent_model_input, encoder_hidden_states=prompt_embeds, timestep=t).sample if do_classifier_free_guidance: model_output_uncond, model_output_text = model_output.chunk(2) model_output = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(model_output, dim=1, keepdim=True) model_output = self.truncate(model_output, truncation_rate) # remove `log(0)`'s (`-inf`s) model_output = model_output.clamp(-70) # compute the previous noisy sample x_t -> x_t-1 sample = self.scheduler.step(model_output, timestep=t, sample=sample, generator=generator).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(i, t, sample) embedding_channels = self.vqvae.config.vq_embed_dim embeddings_shape = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) embeddings = self.vqvae.quantize.get_codebook_entry(sample, shape=embeddings_shape) image = self.vqvae.decode(embeddings, force_not_quantize=True).sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) def truncate(self, log_p_x_0: torch.Tensor, truncation_rate: float) -> torch.Tensor: """ Truncates `log_p_x_0` such that for each column vector, the total cumulative probability is `truncation_rate` The lowest probabilities that would increase the cumulative probability above `truncation_rate` are set to zero. """ sorted_log_p_x_0, indices = torch.sort(log_p_x_0, 1, descending=True) sorted_p_x_0 = torch.exp(sorted_log_p_x_0) keep_mask = sorted_p_x_0.cumsum(dim=1) < truncation_rate # Ensure that at least the largest probability is not zeroed out all_true = torch.full_like(keep_mask[:, 0:1, :], True) keep_mask = torch.cat((all_true, keep_mask), dim=1) keep_mask = keep_mask[:, :-1, :] keep_mask = keep_mask.gather(1, indices.argsort(1)) rv = log_p_x_0.clone() rv[~keep_mask] = -torch.inf # -inf = log(0) return rv
diffusers/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py", "repo_id": "diffusers", "token_count": 6456 }
145
import inspect import math from itertools import repeat from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn.functional as F from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ...configuration_utils import FrozenDict from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.attention_processor import Attention, AttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from ...schedulers import DDIMScheduler, DPMSolverMultistepScheduler from ...utils import ( USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import LEditsPPDiffusionPipelineOutput, LEditsPPInversionPipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import PIL >>> import requests >>> import torch >>> from io import BytesIO >>> from diffusers import LEditsPPPipelineStableDiffusion >>> from diffusers.utils import load_image >>> pipe = LEditsPPPipelineStableDiffusion.from_pretrained( ... "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> img_url = "https://www.aiml.informatik.tu-darmstadt.de/people/mbrack/cherry_blossom.png" >>> image = load_image(img_url).convert("RGB") >>> _ = pipe.invert(image=image, num_inversion_steps=50, skip=0.1) >>> edited_image = pipe( ... editing_prompt=["cherry blossom"], edit_guidance_scale=10.0, edit_threshold=0.75 ... ).images[0] ``` """ # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionAttendAndExcitePipeline.AttentionStore class LeditsAttentionStore: @staticmethod def get_empty_store(): return {"down_cross": [], "mid_cross": [], "up_cross": [], "down_self": [], "mid_self": [], "up_self": []} def __call__(self, attn, is_cross: bool, place_in_unet: str, editing_prompts, PnP=False): # attn.shape = batch_size * head_size, seq_len query, seq_len_key if attn.shape[1] <= self.max_size: bs = 1 + int(PnP) + editing_prompts skip = 2 if PnP else 1 # skip PnP & unconditional attn = torch.stack(attn.split(self.batch_size)).permute(1, 0, 2, 3) source_batch_size = int(attn.shape[1] // bs) self.forward(attn[:, skip * source_batch_size :], is_cross, place_in_unet) def forward(self, attn, is_cross: bool, place_in_unet: str): key = f"{place_in_unet}_{'cross' if is_cross else 'self'}" self.step_store[key].append(attn) def between_steps(self, store_step=True): if store_step: if self.average: if len(self.attention_store) == 0: self.attention_store = self.step_store else: for key in self.attention_store: for i in range(len(self.attention_store[key])): self.attention_store[key][i] += self.step_store[key][i] else: if len(self.attention_store) == 0: self.attention_store = [self.step_store] else: self.attention_store.append(self.step_store) self.cur_step += 1 self.step_store = self.get_empty_store() def get_attention(self, step: int): if self.average: attention = { key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store } else: assert step is not None attention = self.attention_store[step] return attention def aggregate_attention( self, attention_maps, prompts, res: Union[int, Tuple[int]], from_where: List[str], is_cross: bool, select: int ): out = [[] for x in range(self.batch_size)] if isinstance(res, int): num_pixels = res**2 resolution = (res, res) else: num_pixels = res[0] * res[1] resolution = res[:2] for location in from_where: for bs_item in attention_maps[f"{location}_{'cross' if is_cross else 'self'}"]: for batch, item in enumerate(bs_item): if item.shape[1] == num_pixels: cross_maps = item.reshape(len(prompts), -1, *resolution, item.shape[-1])[select] out[batch].append(cross_maps) out = torch.stack([torch.cat(x, dim=0) for x in out]) # average over heads out = out.sum(1) / out.shape[1] return out def __init__(self, average: bool, batch_size=1, max_resolution=16, max_size: int = None): self.step_store = self.get_empty_store() self.attention_store = [] self.cur_step = 0 self.average = average self.batch_size = batch_size if max_size is None: self.max_size = max_resolution**2 elif max_size is not None and max_resolution is None: self.max_size = max_size else: raise ValueError("Only allowed to set one of max_resolution or max_size") # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionAttendAndExcitePipeline.GaussianSmoothing class LeditsGaussianSmoothing: def __init__(self, device): kernel_size = [3, 3] sigma = [0.5, 0.5] # The gaussian kernel is the product of the gaussian function of each dimension. kernel = 1 meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size]) for size, std, mgrid in zip(kernel_size, sigma, meshgrids): mean = (size - 1) / 2 kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-(((mgrid - mean) / (2 * std)) ** 2)) # Make sure sum of values in gaussian kernel equals 1. kernel = kernel / torch.sum(kernel) # Reshape to depthwise convolutional weight kernel = kernel.view(1, 1, *kernel.size()) kernel = kernel.repeat(1, *[1] * (kernel.dim() - 1)) self.weight = kernel.to(device) def __call__(self, input): """ Arguments: Apply gaussian filter to input. input (torch.Tensor): Input to apply gaussian filter on. Returns: filtered (torch.Tensor): Filtered output. """ return F.conv2d(input, weight=self.weight.to(input.dtype)) class LEDITSCrossAttnProcessor: def __init__(self, attention_store, place_in_unet, pnp, editing_prompts): self.attnstore = attention_store self.place_in_unet = place_in_unet self.editing_prompts = editing_prompts self.pnp = pnp def __call__( self, attn: Attention, hidden_states, encoder_hidden_states, attention_mask=None, temb=None, ): batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) self.attnstore( attention_probs, is_cross=True, place_in_unet=self.place_in_unet, editing_prompts=self.editing_prompts, PnP=self.pnp, ) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states / attn.rescale_output_factor return hidden_states # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class LEditsPPPipelineStableDiffusion( DiffusionPipeline, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin ): """ Pipeline for textual image editing using LEDits++ with Stable Diffusion. This model inherits from [`DiffusionPipeline`] and builds on the [`StableDiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer ([`~transformers.CLIPTokenizer`]): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]): A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of [`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]. If any other scheduler is passed it will automatically be set to [`DPMSolverMultistepScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. feature_extractor ([`~transformers.CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->unet->vae" _exclude_from_cpu_offload = ["safety_checker"] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, DPMSolverMultistepScheduler], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if not isinstance(scheduler, DDIMScheduler) and not isinstance(scheduler, DPMSolverMultistepScheduler): scheduler = DPMSolverMultistepScheduler.from_config( scheduler.config, algorithm_type="sde-dpmsolver++", solver_order=2 ) logger.warning( "This pipeline only supports DDIMScheduler and DPMSolverMultistepScheduler. " "The scheduler has been changed to DPMSolverMultistepScheduler." ) if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) self.inversion_steps = None # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, eta, generator=None): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs def check_inputs( self, negative_prompt=None, editing_prompt_embeddings=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if editing_prompt_embeddings is not None and negative_prompt_embeds is not None: if editing_prompt_embeddings.shape != negative_prompt_embeds.shape: raise ValueError( "`editing_prompt_embeddings` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `editing_prompt_embeddings` {editing_prompt_embeddings.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, latents): # shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) # if latents.shape != shape: # raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_unet(self, attention_store, PnP: bool = False): attn_procs = {} for name in self.unet.attn_processors.keys(): if name.startswith("mid_block"): place_in_unet = "mid" elif name.startswith("up_blocks"): place_in_unet = "up" elif name.startswith("down_blocks"): place_in_unet = "down" else: continue if "attn2" in name and place_in_unet != "mid": attn_procs[name] = LEDITSCrossAttnProcessor( attention_store=attention_store, place_in_unet=place_in_unet, pnp=PnP, editing_prompts=self.enabled_editing_prompts, ) else: attn_procs[name] = AttnProcessor() self.unet.set_attn_processor(attn_procs) def encode_prompt( self, device, num_images_per_prompt, enable_edit_guidance, negative_prompt=None, editing_prompt=None, negative_prompt_embeds: Optional[torch.Tensor] = None, editing_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt enable_edit_guidance (`bool`): whether to perform any editing or reconstruct the input image instead negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). editing_prompt (`str` or `List[str]`, *optional*): Editing prompt(s) to be encoded. If not defined, one has to pass `editing_prompt_embeds` instead. editing_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) batch_size = self.batch_size num_edit_tokens = None if negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but exoected" f"{batch_size} based on the input images. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: procecss multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = negative_prompt_embeds.dtype negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) if enable_edit_guidance: if editing_prompt_embeds is None: # textual inversion: procecss multi-vector tokens if necessary # if isinstance(self, TextualInversionLoaderMixin): # prompt = self.maybe_convert_prompt(prompt, self.tokenizer) if isinstance(editing_prompt, str): editing_prompt = [editing_prompt] max_length = negative_prompt_embeds.shape[1] text_inputs = self.tokenizer( [x for item in editing_prompt for x in repeat(item, batch_size)], padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", return_length=True, ) num_edit_tokens = text_inputs.length - 2 # not counting startoftext and endoftext text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer( [x for item in editing_prompt for x in repeat(item, batch_size)], padding="longest", return_tensors="pt", ).input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if ( hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask ): attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: editing_prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) editing_prompt_embeds = editing_prompt_embeds[0] else: editing_prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. editing_prompt_embeds = editing_prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. editing_prompt_embeds = self.text_encoder.text_model.final_layer_norm(editing_prompt_embeds) editing_prompt_embeds = editing_prompt_embeds.to(dtype=negative_prompt_embeds.dtype, device=device) bs_embed_edit, seq_len, _ = editing_prompt_embeds.shape editing_prompt_embeds = editing_prompt_embeds.to(dtype=negative_prompt_embeds.dtype, device=device) editing_prompt_embeds = editing_prompt_embeds.repeat(1, num_images_per_prompt, 1) editing_prompt_embeds = editing_prompt_embeds.view(bs_embed_edit * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return editing_prompt_embeds, negative_prompt_embeds, num_edit_tokens @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, negative_prompt: Optional[Union[str, List[str]]] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, output_type: Optional[str] = "pil", return_dict: bool = True, editing_prompt: Optional[Union[str, List[str]]] = None, editing_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, reverse_editing_direction: Optional[Union[bool, List[bool]]] = False, edit_guidance_scale: Optional[Union[float, List[float]]] = 5, edit_warmup_steps: Optional[Union[int, List[int]]] = 0, edit_cooldown_steps: Optional[Union[int, List[int]]] = None, edit_threshold: Optional[Union[float, List[float]]] = 0.9, user_mask: Optional[torch.Tensor] = None, sem_guidance: Optional[List[torch.Tensor]] = None, use_cross_attn_mask: bool = False, use_intersect_mask: bool = True, attn_store_steps: Optional[List[int]] = [], store_averaged_over_steps: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" The call function to the pipeline for editing. The [`~pipelines.ledits_pp.LEditsPPPipelineStableDiffusion.invert`] method has to be called beforehand. Edits will always be performed for the last inverted image(s). Args: negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). generator (`torch.Generator`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] instead of a plain tuple. editing_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. The image is reconstructed by setting `editing_prompt = None`. Guidance direction of prompt should be specified via `reverse_editing_direction`. editing_prompt_embeds (`torch.Tensor>`, *optional*): Pre-computed embeddings to use for guiding the image generation. Guidance direction of embedding should be specified via `reverse_editing_direction`. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. reverse_editing_direction (`bool` or `List[bool]`, *optional*, defaults to `False`): Whether the corresponding prompt in `editing_prompt` should be increased or decreased. edit_guidance_scale (`float` or `List[float]`, *optional*, defaults to 5): Guidance scale for guiding the image generation. If provided as list values should correspond to `editing_prompt`. `edit_guidance_scale` is defined as `s_e` of equation 12 of [LEDITS++ Paper](https://arxiv.org/abs/2301.12247). edit_warmup_steps (`float` or `List[float]`, *optional*, defaults to 10): Number of diffusion steps (for each prompt) for which guidance will not be applied. edit_cooldown_steps (`float` or `List[float]`, *optional*, defaults to `None`): Number of diffusion steps (for each prompt) after which guidance will no longer be applied. edit_threshold (`float` or `List[float]`, *optional*, defaults to 0.9): Masking threshold of guidance. Threshold should be proportional to the image region that is modified. 'edit_threshold' is defined as 'λ' of equation 12 of [LEDITS++ Paper](https://arxiv.org/abs/2301.12247). user_mask (`torch.Tensor`, *optional*): User-provided mask for even better control over the editing process. This is helpful when LEDITS++'s implicit masks do not meet user preferences. sem_guidance (`List[torch.Tensor]`, *optional*): List of pre-generated guidance vectors to be applied at generation. Length of the list has to correspond to `num_inference_steps`. use_cross_attn_mask (`bool`, defaults to `False`): Whether cross-attention masks are used. Cross-attention masks are always used when use_intersect_mask is set to true. Cross-attention masks are defined as 'M^1' of equation 12 of [LEDITS++ paper](https://arxiv.org/pdf/2311.16711.pdf). use_intersect_mask (`bool`, defaults to `True`): Whether the masking term is calculated as intersection of cross-attention masks and masks derived from the noise estimate. Cross-attention mask are defined as 'M^1' and masks derived from the noise estimate are defined as 'M^2' of equation 12 of [LEDITS++ paper](https://arxiv.org/pdf/2311.16711.pdf). attn_store_steps (`List[int]`, *optional*): Steps for which the attention maps are stored in the AttentionStore. Just for visualization purposes. store_averaged_over_steps (`bool`, defaults to `True`): Whether the attention maps for the 'attn_store_steps' are stored averaged over the diffusion steps. If False, attention maps for each step are stores separately. Just for visualization purposes. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when using zero terminal SNR. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] or `tuple`: [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ if self.inversion_steps is None: raise ValueError( "You need to invert an input image first before calling the pipeline. The `invert` method has to be called beforehand. Edits will always be performed for the last inverted image(s)." ) eta = self.eta num_images_per_prompt = 1 latents = self.init_latents zs = self.zs self.scheduler.set_timesteps(len(self.scheduler.timesteps)) if use_intersect_mask: use_cross_attn_mask = True if use_cross_attn_mask: self.smoothing = LeditsGaussianSmoothing(self.device) if user_mask is not None: user_mask = user_mask.to(self.device) org_prompt = "" # 1. Check inputs. Raise error if not correct self.check_inputs( negative_prompt, editing_prompt_embeds, negative_prompt_embeds, callback_on_step_end_tensor_inputs, ) self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs # 2. Define call parameters batch_size = self.batch_size if editing_prompt: enable_edit_guidance = True if isinstance(editing_prompt, str): editing_prompt = [editing_prompt] self.enabled_editing_prompts = len(editing_prompt) elif editing_prompt_embeds is not None: enable_edit_guidance = True self.enabled_editing_prompts = editing_prompt_embeds.shape[0] else: self.enabled_editing_prompts = 0 enable_edit_guidance = False # 3. Encode input prompt lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) edit_concepts, uncond_embeddings, num_edit_tokens = self.encode_prompt( editing_prompt=editing_prompt, device=self.device, num_images_per_prompt=num_images_per_prompt, enable_edit_guidance=enable_edit_guidance, negative_prompt=negative_prompt, editing_prompt_embeds=editing_prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, clip_skip=self.clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if enable_edit_guidance: text_embeddings = torch.cat([uncond_embeddings, edit_concepts]) self.text_cross_attention_maps = [editing_prompt] if isinstance(editing_prompt, str) else editing_prompt else: text_embeddings = torch.cat([uncond_embeddings]) # 4. Prepare timesteps # self.scheduler.set_timesteps(num_inference_steps, device=self.device) timesteps = self.inversion_steps t_to_idx = {int(v): k for k, v in enumerate(timesteps[-zs.shape[0] :])} if use_cross_attn_mask: self.attention_store = LeditsAttentionStore( average=store_averaged_over_steps, batch_size=batch_size, max_size=(latents.shape[-2] / 4.0) * (latents.shape[-1] / 4.0), max_resolution=None, ) self.prepare_unet(self.attention_store, PnP=False) resolution = latents.shape[-2:] att_res = (int(resolution[0] / 4), int(resolution[1] / 4)) # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, None, None, text_embeddings.dtype, self.device, latents, ) # 6. Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(eta) self.sem_guidance = None self.activation_mask = None # 7. Denoising loop num_warmup_steps = 0 with self.progress_bar(total=len(timesteps)) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance if enable_edit_guidance: latent_model_input = torch.cat([latents] * (1 + self.enabled_editing_prompts)) else: latent_model_input = latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) text_embed_input = text_embeddings # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embed_input).sample noise_pred_out = noise_pred.chunk(1 + self.enabled_editing_prompts) # [b,4, 64, 64] noise_pred_uncond = noise_pred_out[0] noise_pred_edit_concepts = noise_pred_out[1:] noise_guidance_edit = torch.zeros( noise_pred_uncond.shape, device=self.device, dtype=noise_pred_uncond.dtype, ) if sem_guidance is not None and len(sem_guidance) > i: noise_guidance_edit += sem_guidance[i].to(self.device) elif enable_edit_guidance: if self.activation_mask is None: self.activation_mask = torch.zeros( (len(timesteps), len(noise_pred_edit_concepts), *noise_pred_edit_concepts[0].shape) ) if self.sem_guidance is None: self.sem_guidance = torch.zeros((len(timesteps), *noise_pred_uncond.shape)) for c, noise_pred_edit_concept in enumerate(noise_pred_edit_concepts): if isinstance(edit_warmup_steps, list): edit_warmup_steps_c = edit_warmup_steps[c] else: edit_warmup_steps_c = edit_warmup_steps if i < edit_warmup_steps_c: continue if isinstance(edit_guidance_scale, list): edit_guidance_scale_c = edit_guidance_scale[c] else: edit_guidance_scale_c = edit_guidance_scale if isinstance(edit_threshold, list): edit_threshold_c = edit_threshold[c] else: edit_threshold_c = edit_threshold if isinstance(reverse_editing_direction, list): reverse_editing_direction_c = reverse_editing_direction[c] else: reverse_editing_direction_c = reverse_editing_direction if isinstance(edit_cooldown_steps, list): edit_cooldown_steps_c = edit_cooldown_steps[c] elif edit_cooldown_steps is None: edit_cooldown_steps_c = i + 1 else: edit_cooldown_steps_c = edit_cooldown_steps if i >= edit_cooldown_steps_c: continue noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred_uncond if reverse_editing_direction_c: noise_guidance_edit_tmp = noise_guidance_edit_tmp * -1 noise_guidance_edit_tmp = noise_guidance_edit_tmp * edit_guidance_scale_c if user_mask is not None: noise_guidance_edit_tmp = noise_guidance_edit_tmp * user_mask if use_cross_attn_mask: out = self.attention_store.aggregate_attention( attention_maps=self.attention_store.step_store, prompts=self.text_cross_attention_maps, res=att_res, from_where=["up", "down"], is_cross=True, select=self.text_cross_attention_maps.index(editing_prompt[c]), ) attn_map = out[:, :, :, 1 : 1 + num_edit_tokens[c]] # 0 -> startoftext # average over all tokens if attn_map.shape[3] != num_edit_tokens[c]: raise ValueError( f"Incorrect shape of attention_map. Expected size {num_edit_tokens[c]}, but found {attn_map.shape[3]}!" ) attn_map = torch.sum(attn_map, dim=3) # gaussian_smoothing attn_map = F.pad(attn_map.unsqueeze(1), (1, 1, 1, 1), mode="reflect") attn_map = self.smoothing(attn_map).squeeze(1) # torch.quantile function expects float32 if attn_map.dtype == torch.float32: tmp = torch.quantile(attn_map.flatten(start_dim=1), edit_threshold_c, dim=1) else: tmp = torch.quantile( attn_map.flatten(start_dim=1).to(torch.float32), edit_threshold_c, dim=1 ).to(attn_map.dtype) attn_mask = torch.where( attn_map >= tmp.unsqueeze(1).unsqueeze(1).repeat(1, *att_res), 1.0, 0.0 ) # resolution must match latent space dimension attn_mask = F.interpolate( attn_mask.unsqueeze(1), noise_guidance_edit_tmp.shape[-2:], # 64,64 ).repeat(1, 4, 1, 1) self.activation_mask[i, c] = attn_mask.detach().cpu() if not use_intersect_mask: noise_guidance_edit_tmp = noise_guidance_edit_tmp * attn_mask if use_intersect_mask: if t <= 800: noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) noise_guidance_edit_tmp_quantile = torch.sum( noise_guidance_edit_tmp_quantile, dim=1, keepdim=True ) noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat( 1, self.unet.config.in_channels, 1, 1 ) # torch.quantile function expects float32 if noise_guidance_edit_tmp_quantile.dtype == torch.float32: tmp = torch.quantile( noise_guidance_edit_tmp_quantile.flatten(start_dim=2), edit_threshold_c, dim=2, keepdim=False, ) else: tmp = torch.quantile( noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), edit_threshold_c, dim=2, keepdim=False, ).to(noise_guidance_edit_tmp_quantile.dtype) intersect_mask = ( torch.where( noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], torch.ones_like(noise_guidance_edit_tmp), torch.zeros_like(noise_guidance_edit_tmp), ) * attn_mask ) self.activation_mask[i, c] = intersect_mask.detach().cpu() noise_guidance_edit_tmp = noise_guidance_edit_tmp * intersect_mask else: # print(f"only attention mask for step {i}") noise_guidance_edit_tmp = noise_guidance_edit_tmp * attn_mask elif not use_cross_attn_mask: # calculate quantile noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) noise_guidance_edit_tmp_quantile = torch.sum( noise_guidance_edit_tmp_quantile, dim=1, keepdim=True ) noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat(1, 4, 1, 1) # torch.quantile function expects float32 if noise_guidance_edit_tmp_quantile.dtype == torch.float32: tmp = torch.quantile( noise_guidance_edit_tmp_quantile.flatten(start_dim=2), edit_threshold_c, dim=2, keepdim=False, ) else: tmp = torch.quantile( noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), edit_threshold_c, dim=2, keepdim=False, ).to(noise_guidance_edit_tmp_quantile.dtype) self.activation_mask[i, c] = ( torch.where( noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], torch.ones_like(noise_guidance_edit_tmp), torch.zeros_like(noise_guidance_edit_tmp), ) .detach() .cpu() ) noise_guidance_edit_tmp = torch.where( noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], noise_guidance_edit_tmp, torch.zeros_like(noise_guidance_edit_tmp), ) noise_guidance_edit += noise_guidance_edit_tmp self.sem_guidance[i] = noise_guidance_edit.detach().cpu() noise_pred = noise_pred_uncond + noise_guidance_edit if enable_edit_guidance and self.guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg( noise_pred, noise_pred_edit_concepts.mean(dim=0, keepdim=False), guidance_rescale=self.guidance_rescale, ) idx = t_to_idx[int(t)] latents = self.scheduler.step( noise_pred, t, latents, variance_noise=zs[idx], **extra_step_kwargs ).prev_sample # step callback if use_cross_attn_mask: store_step = i in attn_store_steps self.attention_store.between_steps(store_step) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) # prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() # 8. Post-processing if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ 0 ] image, has_nsfw_concept = self.run_safety_checker(image, self.device, text_embeddings.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return LEditsPPDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) @torch.no_grad() def invert( self, image: PipelineImageInput, source_prompt: str = "", source_guidance_scale: float = 3.5, num_inversion_steps: int = 30, skip: float = 0.15, generator: Optional[torch.Generator] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, clip_skip: Optional[int] = None, height: Optional[int] = None, width: Optional[int] = None, resize_mode: Optional[str] = "default", crops_coords: Optional[Tuple[int, int, int, int]] = None, ): r""" The function to the pipeline for image inversion as described by the [LEDITS++ Paper](https://arxiv.org/abs/2301.12247). If the scheduler is set to [`~schedulers.DDIMScheduler`] the inversion proposed by [edit-friendly DPDM](https://arxiv.org/abs/2304.06140) will be performed instead. Args: image (`PipelineImageInput`): Input for the image(s) that are to be edited. Multiple input images have to default to the same aspect ratio. source_prompt (`str`, defaults to `""`): Prompt describing the input image that will be used for guidance during inversion. Guidance is disabled if the `source_prompt` is `""`. source_guidance_scale (`float`, defaults to `3.5`): Strength of guidance during inversion. num_inversion_steps (`int`, defaults to `30`): Number of total performed inversion steps after discarding the initial `skip` steps. skip (`float`, defaults to `0.15`): Portion of initial steps that will be ignored for inversion and subsequent generation. Lower values will lead to stronger changes to the input image. `skip` has to be between `0` and `1`. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make inversion deterministic. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. height (`int`, *optional*, defaults to `None`): The height in preprocessed image. If `None`, will use the `get_default_height_width()` to get default height. width (`int`, *optional*`, defaults to `None`): The width in preprocessed. If `None`, will use get_default_height_width()` to get the default width. resize_mode (`str`, *optional*, defaults to `default`): The resize mode, can be one of `default` or `fill`. If `default`, will resize the image to fit within the specified width and height, and it may not maintaining the original aspect ratio. If `fill`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image. If `crop`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess. Note that resize_mode `fill` and `crop` are only supported for PIL image input. crops_coords (`List[Tuple[int, int, int, int]]`, *optional*, defaults to `None`): The crop coordinates for each image in the batch. If `None`, will not crop the image. Returns: [`~pipelines.ledits_pp.LEditsPPInversionPipelineOutput`]: Output will contain the resized input image(s) and respective VAE reconstruction(s). """ # Reset attn processor, we do not want to store attn maps during inversion self.unet.set_attn_processor(AttnProcessor()) self.eta = 1.0 self.scheduler.config.timestep_spacing = "leading" self.scheduler.set_timesteps(int(num_inversion_steps * (1 + skip))) self.inversion_steps = self.scheduler.timesteps[-num_inversion_steps:] timesteps = self.inversion_steps # 1. encode image x0, resized = self.encode_image( image, dtype=self.text_encoder.dtype, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords, ) self.batch_size = x0.shape[0] # autoencoder reconstruction image_rec = self.vae.decode(x0 / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] image_rec = self.image_processor.postprocess(image_rec, output_type="pil") # 2. get embeddings do_classifier_free_guidance = source_guidance_scale > 1.0 lora_scale = cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None uncond_embedding, text_embeddings, _ = self.encode_prompt( num_images_per_prompt=1, device=self.device, negative_prompt=None, enable_edit_guidance=do_classifier_free_guidance, editing_prompt=source_prompt, lora_scale=lora_scale, clip_skip=clip_skip, ) # 3. find zs and xts variance_noise_shape = (num_inversion_steps, *x0.shape) # intermediate latents t_to_idx = {int(v): k for k, v in enumerate(timesteps)} xts = torch.zeros(size=variance_noise_shape, device=self.device, dtype=uncond_embedding.dtype) for t in reversed(timesteps): idx = num_inversion_steps - t_to_idx[int(t)] - 1 noise = randn_tensor(shape=x0.shape, generator=generator, device=self.device, dtype=x0.dtype) xts[idx] = self.scheduler.add_noise(x0, noise, torch.Tensor([t])) xts = torch.cat([x0.unsqueeze(0), xts], dim=0) self.scheduler.set_timesteps(len(self.scheduler.timesteps)) # noise maps zs = torch.zeros(size=variance_noise_shape, device=self.device, dtype=uncond_embedding.dtype) with self.progress_bar(total=len(timesteps)) as progress_bar: for t in timesteps: idx = num_inversion_steps - t_to_idx[int(t)] - 1 # 1. predict noise residual xt = xts[idx + 1] noise_pred = self.unet(xt, timestep=t, encoder_hidden_states=uncond_embedding).sample if not source_prompt == "": noise_pred_cond = self.unet(xt, timestep=t, encoder_hidden_states=text_embeddings).sample noise_pred = noise_pred + source_guidance_scale * (noise_pred_cond - noise_pred) xtm1 = xts[idx] z, xtm1_corrected = compute_noise(self.scheduler, xtm1, xt, t, noise_pred, self.eta) zs[idx] = z # correction to avoid error accumulation xts[idx] = xtm1_corrected progress_bar.update() self.init_latents = xts[-1].expand(self.batch_size, -1, -1, -1) zs = zs.flip(0) self.zs = zs return LEditsPPInversionPipelineOutput(images=resized, vae_reconstruction_images=image_rec) @torch.no_grad() def encode_image(self, image, dtype=None, height=None, width=None, resize_mode="default", crops_coords=None): image = self.image_processor.preprocess( image=image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords ) resized = self.image_processor.postprocess(image=image, output_type="pil") if max(image.shape[-2:]) > self.vae.config["sample_size"] * 1.5: logger.warning( "Your input images far exceed the default resolution of the underlying diffusion model. " "The output images may contain severe artifacts! " "Consider down-sampling the input using the `height` and `width` parameters" ) image = image.to(dtype) x0 = self.vae.encode(image.to(self.device)).latent_dist.mode() x0 = x0.to(dtype) x0 = self.vae.config.scaling_factor * x0 return x0, resized def compute_noise_ddim(scheduler, prev_latents, latents, timestep, noise_pred, eta): # 1. get previous step value (=t-1) prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps # 2. compute alphas, betas alpha_prod_t = scheduler.alphas_cumprod[timestep] alpha_prod_t_prev = ( scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod ) beta_prod_t = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5) # 4. Clip "predicted x_0" if scheduler.config.clip_sample: pred_original_sample = torch.clamp(pred_original_sample, -1, 1) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) variance = scheduler._get_variance(timestep, prev_timestep) std_dev_t = eta * variance ** (0.5) # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * noise_pred # modifed so that updated xtm1 is returned as well (to avoid error accumulation) mu_xt = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction if variance > 0.0: noise = (prev_latents - mu_xt) / (variance ** (0.5) * eta) else: noise = torch.tensor([0.0]).to(latents.device) return noise, mu_xt + (eta * variance**0.5) * noise def compute_noise_sde_dpm_pp_2nd(scheduler, prev_latents, latents, timestep, noise_pred, eta): def first_order_update(model_output, sample): # timestep, prev_timestep, sample): sigma_t, sigma_s = scheduler.sigmas[scheduler.step_index + 1], scheduler.sigmas[scheduler.step_index] alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t) alpha_s, sigma_s = scheduler._sigma_to_alpha_sigma_t(sigma_s) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s = torch.log(alpha_s) - torch.log(sigma_s) h = lambda_t - lambda_s mu_xt = (sigma_t / sigma_s * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output mu_xt = scheduler.dpm_solver_first_order_update( model_output=model_output, sample=sample, noise=torch.zeros_like(sample) ) sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) if sigma > 0.0: noise = (prev_latents - mu_xt) / sigma else: noise = torch.tensor([0.0]).to(sample.device) prev_sample = mu_xt + sigma * noise return noise, prev_sample def second_order_update(model_output_list, sample): # timestep_list, prev_timestep, sample): sigma_t, sigma_s0, sigma_s1 = ( scheduler.sigmas[scheduler.step_index + 1], scheduler.sigmas[scheduler.step_index], scheduler.sigmas[scheduler.step_index - 1], ) alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t) alpha_s0, sigma_s0 = scheduler._sigma_to_alpha_sigma_t(sigma_s0) alpha_s1, sigma_s1 = scheduler._sigma_to_alpha_sigma_t(sigma_s1) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) m0, m1 = model_output_list[-1], model_output_list[-2] h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 r0 = h_0 / h D0, D1 = m0, (1.0 / r0) * (m0 - m1) mu_xt = ( (sigma_t / sigma_s0 * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 ) sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) if sigma > 0.0: noise = (prev_latents - mu_xt) / sigma else: noise = torch.tensor([0.0]).to(sample.device) prev_sample = mu_xt + sigma * noise return noise, prev_sample if scheduler.step_index is None: scheduler._init_step_index(timestep) model_output = scheduler.convert_model_output(model_output=noise_pred, sample=latents) for i in range(scheduler.config.solver_order - 1): scheduler.model_outputs[i] = scheduler.model_outputs[i + 1] scheduler.model_outputs[-1] = model_output if scheduler.lower_order_nums < 1: noise, prev_sample = first_order_update(model_output, latents) else: noise, prev_sample = second_order_update(scheduler.model_outputs, latents) if scheduler.lower_order_nums < scheduler.config.solver_order: scheduler.lower_order_nums += 1 # upon completion increase step index by one scheduler._step_index += 1 return noise, prev_sample def compute_noise(scheduler, *args): if isinstance(scheduler, DDIMScheduler): return compute_noise_ddim(scheduler, *args) elif ( isinstance(scheduler, DPMSolverMultistepScheduler) and scheduler.config.algorithm_type == "sde-dpmsolver++" and scheduler.config.solver_order == 2 ): return compute_noise_sde_dpm_pp_2nd(scheduler, *args) else: raise NotImplementedError
diffusers/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py", "repo_id": "diffusers", "token_count": 35604 }
146
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import os import re import warnings from pathlib import Path from typing import Any, Dict, List, Optional, Union import torch from huggingface_hub import ModelCard, model_info from huggingface_hub.utils import validate_hf_hub_args from packaging import version from .. import __version__ from ..utils import ( FLAX_WEIGHTS_NAME, ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, deprecate, get_class_from_dynamic_module, is_accelerate_available, is_peft_available, is_transformers_available, logging, ) from ..utils.torch_utils import is_compiled_module if is_transformers_available(): import transformers from transformers import PreTrainedModel from transformers.utils import FLAX_WEIGHTS_NAME as TRANSFORMERS_FLAX_WEIGHTS_NAME from transformers.utils import SAFE_WEIGHTS_NAME as TRANSFORMERS_SAFE_WEIGHTS_NAME from transformers.utils import WEIGHTS_NAME as TRANSFORMERS_WEIGHTS_NAME if is_accelerate_available(): import accelerate from accelerate import dispatch_model from accelerate.hooks import remove_hook_from_module from accelerate.utils import compute_module_sizes, get_max_memory INDEX_FILE = "diffusion_pytorch_model.bin" CUSTOM_PIPELINE_FILE_NAME = "pipeline.py" DUMMY_MODULES_FOLDER = "diffusers.utils" TRANSFORMERS_DUMMY_MODULES_FOLDER = "transformers.utils" CONNECTED_PIPES_KEYS = ["prior"] logger = logging.get_logger(__name__) LOADABLE_CLASSES = { "diffusers": { "ModelMixin": ["save_pretrained", "from_pretrained"], "SchedulerMixin": ["save_pretrained", "from_pretrained"], "DiffusionPipeline": ["save_pretrained", "from_pretrained"], "OnnxRuntimeModel": ["save_pretrained", "from_pretrained"], }, "transformers": { "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"], "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"], "PreTrainedModel": ["save_pretrained", "from_pretrained"], "FeatureExtractionMixin": ["save_pretrained", "from_pretrained"], "ProcessorMixin": ["save_pretrained", "from_pretrained"], "ImageProcessingMixin": ["save_pretrained", "from_pretrained"], }, "onnxruntime.training": { "ORTModule": ["save_pretrained", "from_pretrained"], }, } ALL_IMPORTABLE_CLASSES = {} for library in LOADABLE_CLASSES: ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) def is_safetensors_compatible(filenames, passed_components=None, folder_names=None) -> bool: """ Checking for safetensors compatibility: - The model is safetensors compatible only if there is a safetensors file for each model component present in filenames. Converting default pytorch serialized filenames to safetensors serialized filenames: - For models from the diffusers library, just replace the ".bin" extension with ".safetensors" - For models from the transformers library, the filename changes from "pytorch_model" to "model", and the ".bin" extension is replaced with ".safetensors" """ passed_components = passed_components or [] if folder_names is not None: filenames = {f for f in filenames if os.path.split(f)[0] in folder_names} # extract all components of the pipeline and their associated files components = {} for filename in filenames: if not len(filename.split("/")) == 2: continue component, component_filename = filename.split("/") if component in passed_components: continue components.setdefault(component, []) components[component].append(component_filename) # iterate over all files of a component # check if safetensor files exist for that component # if variant is provided check if the variant of the safetensors exists for component, component_filenames in components.items(): matches = [] for component_filename in component_filenames: filename, extension = os.path.splitext(component_filename) match_exists = extension == ".safetensors" matches.append(match_exists) if not any(matches): return False return True def variant_compatible_siblings(filenames, variant=None) -> Union[List[os.PathLike], str]: weight_names = [ WEIGHTS_NAME, SAFETENSORS_WEIGHTS_NAME, FLAX_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, ONNX_EXTERNAL_WEIGHTS_NAME, ] if is_transformers_available(): weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME, TRANSFORMERS_FLAX_WEIGHTS_NAME] # model_pytorch, diffusion_model_pytorch, ... weight_prefixes = [w.split(".")[0] for w in weight_names] # .bin, .safetensors, ... weight_suffixs = [w.split(".")[-1] for w in weight_names] # -00001-of-00002 transformers_index_format = r"\d{5}-of-\d{5}" if variant is not None: # `diffusion_pytorch_model.fp16.bin` as well as `model.fp16-00001-of-00002.safetensors` variant_file_re = re.compile( rf"({'|'.join(weight_prefixes)})\.({variant}|{variant}-{transformers_index_format})\.({'|'.join(weight_suffixs)})$" ) # `text_encoder/pytorch_model.bin.index.fp16.json` variant_index_re = re.compile( rf"({'|'.join(weight_prefixes)})\.({'|'.join(weight_suffixs)})\.index\.{variant}\.json$" ) # `diffusion_pytorch_model.bin` as well as `model-00001-of-00002.safetensors` non_variant_file_re = re.compile( rf"({'|'.join(weight_prefixes)})(-{transformers_index_format})?\.({'|'.join(weight_suffixs)})$" ) # `text_encoder/pytorch_model.bin.index.json` non_variant_index_re = re.compile(rf"({'|'.join(weight_prefixes)})\.({'|'.join(weight_suffixs)})\.index\.json") if variant is not None: variant_weights = {f for f in filenames if variant_file_re.match(f.split("/")[-1]) is not None} variant_indexes = {f for f in filenames if variant_index_re.match(f.split("/")[-1]) is not None} variant_filenames = variant_weights | variant_indexes else: variant_filenames = set() non_variant_weights = {f for f in filenames if non_variant_file_re.match(f.split("/")[-1]) is not None} non_variant_indexes = {f for f in filenames if non_variant_index_re.match(f.split("/")[-1]) is not None} non_variant_filenames = non_variant_weights | non_variant_indexes # all variant filenames will be used by default usable_filenames = set(variant_filenames) def convert_to_variant(filename): if "index" in filename: variant_filename = filename.replace("index", f"index.{variant}") elif re.compile(f"^(.*?){transformers_index_format}").match(filename) is not None: variant_filename = f"{filename.split('-')[0]}.{variant}-{'-'.join(filename.split('-')[1:])}" else: variant_filename = f"{filename.split('.')[0]}.{variant}.{filename.split('.')[1]}" return variant_filename for f in non_variant_filenames: variant_filename = convert_to_variant(f) if variant_filename not in usable_filenames: usable_filenames.add(f) return usable_filenames, variant_filenames @validate_hf_hub_args def warn_deprecated_model_variant(pretrained_model_name_or_path, token, variant, revision, model_filenames): info = model_info( pretrained_model_name_or_path, token=token, revision=None, ) filenames = {sibling.rfilename for sibling in info.siblings} comp_model_filenames, _ = variant_compatible_siblings(filenames, variant=revision) comp_model_filenames = [".".join(f.split(".")[:1] + f.split(".")[2:]) for f in comp_model_filenames] if set(model_filenames).issubset(set(comp_model_filenames)): warnings.warn( f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` even though you can load it via `variant=`{revision}`. Loading model variants via `revision='{revision}'` is deprecated and will be removed in diffusers v1. Please use `variant='{revision}'` instead.", FutureWarning, ) else: warnings.warn( f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have the required variant filenames in the 'main' branch. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {revision} files' so that the correct variant file can be added.", FutureWarning, ) def _unwrap_model(model): """Unwraps a model.""" if is_compiled_module(model): model = model._orig_mod if is_peft_available(): from peft import PeftModel if isinstance(model, PeftModel): model = model.base_model.model return model def maybe_raise_or_warn( library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module ): """Simple helper method to raise or warn in case incorrect module has been passed""" if not is_pipeline_module: library = importlib.import_module(library_name) class_obj = getattr(library, class_name) class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} expected_class_obj = None for class_name, class_candidate in class_candidates.items(): if class_candidate is not None and issubclass(class_obj, class_candidate): expected_class_obj = class_candidate # Dynamo wraps the original model in a private class. # I didn't find a public API to get the original class. sub_model = passed_class_obj[name] unwrapped_sub_model = _unwrap_model(sub_model) model_cls = unwrapped_sub_model.__class__ if not issubclass(model_cls, expected_class_obj): raise ValueError( f"{passed_class_obj[name]} is of type: {model_cls}, but should be" f" {expected_class_obj}" ) else: logger.warning( f"You have passed a non-standard module {passed_class_obj[name]}. We cannot verify whether it" " has the correct type" ) def get_class_obj_and_candidates( library_name, class_name, importable_classes, pipelines, is_pipeline_module, component_name=None, cache_dir=None ): """Simple helper method to retrieve class object of module as well as potential parent class objects""" component_folder = os.path.join(cache_dir, component_name) if is_pipeline_module: pipeline_module = getattr(pipelines, library_name) class_obj = getattr(pipeline_module, class_name) class_candidates = {c: class_obj for c in importable_classes.keys()} elif os.path.isfile(os.path.join(component_folder, library_name + ".py")): # load custom component class_obj = get_class_from_dynamic_module( component_folder, module_file=library_name + ".py", class_name=class_name ) class_candidates = {c: class_obj for c in importable_classes.keys()} else: # else we just import it from the library. library = importlib.import_module(library_name) class_obj = getattr(library, class_name) class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} return class_obj, class_candidates def _get_custom_pipeline_class( custom_pipeline, repo_id=None, hub_revision=None, class_name=None, cache_dir=None, revision=None, ): if custom_pipeline.endswith(".py"): path = Path(custom_pipeline) # decompose into folder & file file_name = path.name custom_pipeline = path.parent.absolute() elif repo_id is not None: file_name = f"{custom_pipeline}.py" custom_pipeline = repo_id else: file_name = CUSTOM_PIPELINE_FILE_NAME if repo_id is not None and hub_revision is not None: # if we load the pipeline code from the Hub # make sure to overwrite the `revision` revision = hub_revision return get_class_from_dynamic_module( custom_pipeline, module_file=file_name, class_name=class_name, cache_dir=cache_dir, revision=revision, ) def _get_pipeline_class( class_obj, config=None, load_connected_pipeline=False, custom_pipeline=None, repo_id=None, hub_revision=None, class_name=None, cache_dir=None, revision=None, ): if custom_pipeline is not None: return _get_custom_pipeline_class( custom_pipeline, repo_id=repo_id, hub_revision=hub_revision, class_name=class_name, cache_dir=cache_dir, revision=revision, ) if class_obj.__name__ != "DiffusionPipeline": return class_obj diffusers_module = importlib.import_module(class_obj.__module__.split(".")[0]) class_name = class_name or config["_class_name"] if not class_name: raise ValueError( "The class name could not be found in the configuration file. Please make sure to pass the correct `class_name`." ) class_name = class_name[4:] if class_name.startswith("Flax") else class_name pipeline_cls = getattr(diffusers_module, class_name) if load_connected_pipeline: from .auto_pipeline import _get_connected_pipeline connected_pipeline_cls = _get_connected_pipeline(pipeline_cls) if connected_pipeline_cls is not None: logger.info( f"Loading connected pipeline {connected_pipeline_cls.__name__} instead of {pipeline_cls.__name__} as specified via `load_connected_pipeline=True`" ) else: logger.info(f"{pipeline_cls.__name__} has no connected pipeline class. Loading {pipeline_cls.__name__}.") pipeline_cls = connected_pipeline_cls or pipeline_cls return pipeline_cls def _load_empty_model( library_name: str, class_name: str, importable_classes: List[Any], pipelines: Any, is_pipeline_module: bool, name: str, torch_dtype: Union[str, torch.dtype], cached_folder: Union[str, os.PathLike], **kwargs, ): # retrieve class objects. class_obj, _ = get_class_obj_and_candidates( library_name, class_name, importable_classes, pipelines, is_pipeline_module, component_name=name, cache_dir=cached_folder, ) if is_transformers_available(): transformers_version = version.parse(version.parse(transformers.__version__).base_version) else: transformers_version = "N/A" # Determine library. is_transformers_model = ( is_transformers_available() and issubclass(class_obj, PreTrainedModel) and transformers_version >= version.parse("4.20.0") ) diffusers_module = importlib.import_module(__name__.split(".")[0]) is_diffusers_model = issubclass(class_obj, diffusers_module.ModelMixin) model = None config_path = cached_folder user_agent = { "diffusers": __version__, "file_type": "model", "framework": "pytorch", } if is_diffusers_model: # Load config and then the model on meta. config, unused_kwargs, commit_hash = class_obj.load_config( os.path.join(config_path, name), cache_dir=cached_folder, return_unused_kwargs=True, return_commit_hash=True, force_download=kwargs.pop("force_download", False), proxies=kwargs.pop("proxies", None), local_files_only=kwargs.pop("local_files_only", False), token=kwargs.pop("token", None), revision=kwargs.pop("revision", None), subfolder=kwargs.pop("subfolder", None), user_agent=user_agent, ) with accelerate.init_empty_weights(): model = class_obj.from_config(config, **unused_kwargs) elif is_transformers_model: config_class = getattr(class_obj, "config_class", None) if config_class is None: raise ValueError("`config_class` cannot be None. Please double-check the model.") config = config_class.from_pretrained( cached_folder, subfolder=name, force_download=kwargs.pop("force_download", False), proxies=kwargs.pop("proxies", None), local_files_only=kwargs.pop("local_files_only", False), token=kwargs.pop("token", None), revision=kwargs.pop("revision", None), user_agent=user_agent, ) with accelerate.init_empty_weights(): model = class_obj(config) if model is not None: model = model.to(dtype=torch_dtype) return model def _assign_components_to_devices( module_sizes: Dict[str, float], device_memory: Dict[str, float], device_mapping_strategy: str = "balanced" ): device_ids = list(device_memory.keys()) device_cycle = device_ids + device_ids[::-1] device_memory = device_memory.copy() device_id_component_mapping = {} current_device_index = 0 for component in module_sizes: device_id = device_cycle[current_device_index % len(device_cycle)] component_memory = module_sizes[component] curr_device_memory = device_memory[device_id] # If the GPU doesn't fit the current component offload to the CPU. if component_memory > curr_device_memory: device_id_component_mapping["cpu"] = [component] else: if device_id not in device_id_component_mapping: device_id_component_mapping[device_id] = [component] else: device_id_component_mapping[device_id].append(component) # Update the device memory. device_memory[device_id] -= component_memory current_device_index += 1 return device_id_component_mapping def _get_final_device_map(device_map, pipeline_class, passed_class_obj, init_dict, library, max_memory, **kwargs): # To avoid circular import problem. from diffusers import pipelines torch_dtype = kwargs.get("torch_dtype", torch.float32) # Load each module in the pipeline on a meta device so that we can derive the device map. init_empty_modules = {} for name, (library_name, class_name) in init_dict.items(): if class_name.startswith("Flax"): raise ValueError("Flax pipelines are not supported with `device_map`.") # Define all importable classes is_pipeline_module = hasattr(pipelines, library_name) importable_classes = ALL_IMPORTABLE_CLASSES loaded_sub_model = None # Use passed sub model or load class_name from library_name if name in passed_class_obj: # if the model is in a pipeline module, then we load it from the pipeline # check that passed_class_obj has correct parent class maybe_raise_or_warn( library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module, ) with accelerate.init_empty_weights(): loaded_sub_model = passed_class_obj[name] else: loaded_sub_model = _load_empty_model( library_name=library_name, class_name=class_name, importable_classes=importable_classes, pipelines=pipelines, is_pipeline_module=is_pipeline_module, pipeline_class=pipeline_class, name=name, torch_dtype=torch_dtype, cached_folder=kwargs.get("cached_folder", None), force_download=kwargs.get("force_download", None), proxies=kwargs.get("proxies", None), local_files_only=kwargs.get("local_files_only", None), token=kwargs.get("token", None), revision=kwargs.get("revision", None), ) if loaded_sub_model is not None: init_empty_modules[name] = loaded_sub_model # determine device map # Obtain a sorted dictionary for mapping the model-level components # to their sizes. module_sizes = { module_name: compute_module_sizes(module, dtype=torch_dtype)[""] for module_name, module in init_empty_modules.items() if isinstance(module, torch.nn.Module) } module_sizes = dict(sorted(module_sizes.items(), key=lambda item: item[1], reverse=True)) # Obtain maximum memory available per device (GPUs only). max_memory = get_max_memory(max_memory) max_memory = dict(sorted(max_memory.items(), key=lambda item: item[1], reverse=True)) max_memory = {k: v for k, v in max_memory.items() if k != "cpu"} # Obtain a dictionary mapping the model-level components to the available # devices based on the maximum memory and the model sizes. final_device_map = None if len(max_memory) > 0: device_id_component_mapping = _assign_components_to_devices( module_sizes, max_memory, device_mapping_strategy=device_map ) # Obtain the final device map, e.g., `{"unet": 0, "text_encoder": 1, "vae": 1, ...}` final_device_map = {} for device_id, components in device_id_component_mapping.items(): for component in components: final_device_map[component] = device_id return final_device_map def load_sub_model( library_name: str, class_name: str, importable_classes: List[Any], pipelines: Any, is_pipeline_module: bool, pipeline_class: Any, torch_dtype: torch.dtype, provider: Any, sess_options: Any, device_map: Optional[Union[Dict[str, torch.device], str]], max_memory: Optional[Dict[Union[int, str], Union[int, str]]], offload_folder: Optional[Union[str, os.PathLike]], offload_state_dict: bool, model_variants: Dict[str, str], name: str, from_flax: bool, variant: str, low_cpu_mem_usage: bool, cached_folder: Union[str, os.PathLike], ): """Helper method to load the module `name` from `library_name` and `class_name`""" # retrieve class candidates class_obj, class_candidates = get_class_obj_and_candidates( library_name, class_name, importable_classes, pipelines, is_pipeline_module, component_name=name, cache_dir=cached_folder, ) load_method_name = None # retrieve load method name for class_name, class_candidate in class_candidates.items(): if class_candidate is not None and issubclass(class_obj, class_candidate): load_method_name = importable_classes[class_name][1] # if load method name is None, then we have a dummy module -> raise Error if load_method_name is None: none_module = class_obj.__module__ is_dummy_path = none_module.startswith(DUMMY_MODULES_FOLDER) or none_module.startswith( TRANSFORMERS_DUMMY_MODULES_FOLDER ) if is_dummy_path and "dummy" in none_module: # call class_obj for nice error message of missing requirements class_obj() raise ValueError( f"The component {class_obj} of {pipeline_class} cannot be loaded as it does not seem to have" f" any of the loading methods defined in {ALL_IMPORTABLE_CLASSES}." ) load_method = getattr(class_obj, load_method_name) # add kwargs to loading method diffusers_module = importlib.import_module(__name__.split(".")[0]) loading_kwargs = {} if issubclass(class_obj, torch.nn.Module): loading_kwargs["torch_dtype"] = torch_dtype if issubclass(class_obj, diffusers_module.OnnxRuntimeModel): loading_kwargs["provider"] = provider loading_kwargs["sess_options"] = sess_options is_diffusers_model = issubclass(class_obj, diffusers_module.ModelMixin) if is_transformers_available(): transformers_version = version.parse(version.parse(transformers.__version__).base_version) else: transformers_version = "N/A" is_transformers_model = ( is_transformers_available() and issubclass(class_obj, PreTrainedModel) and transformers_version >= version.parse("4.20.0") ) # When loading a transformers model, if the device_map is None, the weights will be initialized as opposed to diffusers. # To make default loading faster we set the `low_cpu_mem_usage=low_cpu_mem_usage` flag which is `True` by default. # This makes sure that the weights won't be initialized which significantly speeds up loading. if is_diffusers_model or is_transformers_model: loading_kwargs["device_map"] = device_map loading_kwargs["max_memory"] = max_memory loading_kwargs["offload_folder"] = offload_folder loading_kwargs["offload_state_dict"] = offload_state_dict loading_kwargs["variant"] = model_variants.pop(name, None) if from_flax: loading_kwargs["from_flax"] = True # the following can be deleted once the minimum required `transformers` version # is higher than 4.27 if ( is_transformers_model and loading_kwargs["variant"] is not None and transformers_version < version.parse("4.27.0") ): raise ImportError( f"When passing `variant='{variant}'`, please make sure to upgrade your `transformers` version to at least 4.27.0.dev0" ) elif is_transformers_model and loading_kwargs["variant"] is None: loading_kwargs.pop("variant") # if `from_flax` and model is transformer model, can currently not load with `low_cpu_mem_usage` if not (from_flax and is_transformers_model): loading_kwargs["low_cpu_mem_usage"] = low_cpu_mem_usage else: loading_kwargs["low_cpu_mem_usage"] = False # check if the module is in a subdirectory if os.path.isdir(os.path.join(cached_folder, name)): loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs) else: # else load from the root directory loaded_sub_model = load_method(cached_folder, **loading_kwargs) if isinstance(loaded_sub_model, torch.nn.Module) and isinstance(device_map, dict): # remove hooks remove_hook_from_module(loaded_sub_model, recurse=True) needs_offloading_to_cpu = device_map[""] == "cpu" if needs_offloading_to_cpu: dispatch_model( loaded_sub_model, state_dict=loaded_sub_model.state_dict(), device_map=device_map, force_hooks=True, main_device=0, ) else: dispatch_model(loaded_sub_model, device_map=device_map, force_hooks=True) return loaded_sub_model def _fetch_class_library_tuple(module): # import it here to avoid circular import diffusers_module = importlib.import_module(__name__.split(".")[0]) pipelines = getattr(diffusers_module, "pipelines") # register the config from the original module, not the dynamo compiled one not_compiled_module = _unwrap_model(module) library = not_compiled_module.__module__.split(".")[0] # check if the module is a pipeline module module_path_items = not_compiled_module.__module__.split(".") pipeline_dir = module_path_items[-2] if len(module_path_items) > 2 else None path = not_compiled_module.__module__.split(".") is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir) # if library is not in LOADABLE_CLASSES, then it is a custom module. # Or if it's a pipeline module, then the module is inside the pipeline # folder so we set the library to module name. if is_pipeline_module: library = pipeline_dir elif library not in LOADABLE_CLASSES: library = not_compiled_module.__module__ # retrieve class_name class_name = not_compiled_module.__class__.__name__ return (library, class_name) def _identify_model_variants(folder: str, variant: str, config: dict) -> dict: model_variants = {} if variant is not None: for sub_folder in os.listdir(folder): folder_path = os.path.join(folder, sub_folder) is_folder = os.path.isdir(folder_path) and sub_folder in config variant_exists = is_folder and any(p.split(".")[1].startswith(variant) for p in os.listdir(folder_path)) if variant_exists: model_variants[sub_folder] = variant return model_variants def _resolve_custom_pipeline_and_cls(folder, config, custom_pipeline): custom_class_name = None if os.path.isfile(os.path.join(folder, f"{custom_pipeline}.py")): custom_pipeline = os.path.join(folder, f"{custom_pipeline}.py") elif isinstance(config["_class_name"], (list, tuple)) and os.path.isfile( os.path.join(folder, f"{config['_class_name'][0]}.py") ): custom_pipeline = os.path.join(folder, f"{config['_class_name'][0]}.py") custom_class_name = config["_class_name"][1] return custom_pipeline, custom_class_name def _maybe_raise_warning_for_inpainting(pipeline_class, pretrained_model_name_or_path: str, config: dict): if pipeline_class.__name__ == "StableDiffusionInpaintPipeline" and version.parse( version.parse(config["_diffusers_version"]).base_version ) <= version.parse("0.5.1"): from diffusers import StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy pipeline_class = StableDiffusionInpaintPipelineLegacy deprecation_message = ( "You are using a legacy checkpoint for inpainting with Stable Diffusion, therefore we are loading the" f" {StableDiffusionInpaintPipelineLegacy} class instead of {StableDiffusionInpaintPipeline}. For" " better inpainting results, we strongly suggest using Stable Diffusion's official inpainting" " checkpoint: https://huggingface.co/runwayml/stable-diffusion-inpainting instead or adapting your" f" checkpoint {pretrained_model_name_or_path} to the format of" " https://huggingface.co/runwayml/stable-diffusion-inpainting. Note that we do not actively maintain" " the {StableDiffusionInpaintPipelineLegacy} class and will likely remove it in version 1.0.0." ) deprecate("StableDiffusionInpaintPipelineLegacy", "1.0.0", deprecation_message, standard_warn=False) def _update_init_kwargs_with_connected_pipeline( init_kwargs: dict, passed_pipe_kwargs: dict, passed_class_objs: dict, folder: str, **pipeline_loading_kwargs ) -> dict: from .pipeline_utils import DiffusionPipeline modelcard = ModelCard.load(os.path.join(folder, "README.md")) connected_pipes = {prefix: getattr(modelcard.data, prefix, [None])[0] for prefix in CONNECTED_PIPES_KEYS} # We don't scheduler argument to match the existing logic: # https://github.com/huggingface/diffusers/blob/867e0c919e1aa7ef8b03c8eb1460f4f875a683ae/src/diffusers/pipelines/pipeline_utils.py#L906C13-L925C14 pipeline_loading_kwargs_cp = pipeline_loading_kwargs.copy() if pipeline_loading_kwargs_cp is not None and len(pipeline_loading_kwargs_cp) >= 1: for k in pipeline_loading_kwargs: if "scheduler" in k: _ = pipeline_loading_kwargs_cp.pop(k) def get_connected_passed_kwargs(prefix): connected_passed_class_obj = { k.replace(f"{prefix}_", ""): w for k, w in passed_class_objs.items() if k.split("_")[0] == prefix } connected_passed_pipe_kwargs = { k.replace(f"{prefix}_", ""): w for k, w in passed_pipe_kwargs.items() if k.split("_")[0] == prefix } connected_passed_kwargs = {**connected_passed_class_obj, **connected_passed_pipe_kwargs} return connected_passed_kwargs connected_pipes = { prefix: DiffusionPipeline.from_pretrained( repo_id, **pipeline_loading_kwargs_cp, **get_connected_passed_kwargs(prefix) ) for prefix, repo_id in connected_pipes.items() if repo_id is not None } for prefix, connected_pipe in connected_pipes.items(): # add connected pipes to `init_kwargs` with <prefix>_<component_name>, e.g. "prior_text_encoder" init_kwargs.update( {"_".join([prefix, name]): component for name, component in connected_pipe.components.items()} ) return init_kwargs
diffusers/src/diffusers/pipelines/pipeline_loading_utils.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/pipeline_loading_utils.py", "repo_id": "diffusers", "token_count": 13794 }
147
from dataclasses import dataclass from enum import Enum from typing import TYPE_CHECKING, List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( DIFFUSERS_SLOW_IMPORT, BaseOutput, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) @dataclass class SafetyConfig(object): WEAK = { "sld_warmup_steps": 15, "sld_guidance_scale": 20, "sld_threshold": 0.0, "sld_momentum_scale": 0.0, "sld_mom_beta": 0.0, } MEDIUM = { "sld_warmup_steps": 10, "sld_guidance_scale": 1000, "sld_threshold": 0.01, "sld_momentum_scale": 0.3, "sld_mom_beta": 0.4, } STRONG = { "sld_warmup_steps": 7, "sld_guidance_scale": 2000, "sld_threshold": 0.025, "sld_momentum_scale": 0.5, "sld_mom_beta": 0.7, } MAX = { "sld_warmup_steps": 0, "sld_guidance_scale": 5000, "sld_threshold": 1.0, "sld_momentum_scale": 0.5, "sld_mom_beta": 0.7, } _dummy_objects = {} _additional_imports = {} _import_structure = {} _additional_imports.update({"SafetyConfig": SafetyConfig}) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure.update( { "pipeline_output": ["StableDiffusionSafePipelineOutput"], "pipeline_stable_diffusion_safe": ["StableDiffusionPipelineSafe"], "safety_checker": ["StableDiffusionSafetyChecker"], } ) if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_output import StableDiffusionSafePipelineOutput from .pipeline_stable_diffusion_safe import StableDiffusionPipelineSafe from .safety_checker import SafeStableDiffusionSafetyChecker else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) for name, value in _additional_imports.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/stable_diffusion_safe/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_safe/__init__.py", "repo_id": "diffusers", "token_count": 1237 }
148
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["pipeline_stable_diffusion_adapter"] = ["StableDiffusionAdapterPipeline"] _import_structure["pipeline_stable_diffusion_xl_adapter"] = ["StableDiffusionXLAdapterPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_stable_diffusion_adapter import StableDiffusionAdapterPipeline from .pipeline_stable_diffusion_xl_adapter import StableDiffusionXLAdapterPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/t2i_adapter/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/t2i_adapter/__init__.py", "repo_id": "diffusers", "token_count": 602 }
149
import inspect from dataclasses import dataclass from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection, GPT2Tokenizer, ) from ...image_processor import VaeImageProcessor from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers from ...utils.outputs import BaseOutput from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name # New BaseOutput child class for joint image-text output @dataclass class ImageTextPipelineOutput(BaseOutput): """ Output class for joint image-text pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. text (`List[str]` or `List[List[str]]`) List of generated text strings of length `batch_size` or a list of list of strings whose outer list has length `batch_size`. """ images: Optional[Union[List[PIL.Image.Image], np.ndarray]] text: Optional[Union[List[str], List[List[str]]]] class UniDiffuserPipeline(DiffusionPipeline): r""" Pipeline for a bimodal image-text model which supports unconditional text and image generation, text-conditioned image generation, image-conditioned text generation, and joint image-text generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. This is part of the UniDiffuser image representation along with the CLIP vision encoding. text_encoder ([`CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). image_encoder ([`CLIPVisionModel`]): A [`~transformers.CLIPVisionModel`] to encode images as part of its image representation along with the VAE latent representation. image_processor ([`CLIPImageProcessor`]): [`~transformers.CLIPImageProcessor`] to preprocess an image before CLIP encoding it with `image_encoder`. clip_tokenizer ([`CLIPTokenizer`]): A [`~transformers.CLIPTokenizer`] to tokenize the prompt before encoding it with `text_encoder`. text_decoder ([`UniDiffuserTextDecoder`]): Frozen text decoder. This is a GPT-style model which is used to generate text from the UniDiffuser embedding. text_tokenizer ([`GPT2Tokenizer`]): A [`~transformers.GPT2Tokenizer`] to decode text for text generation; used along with the `text_decoder`. unet ([`UniDiffuserModel`]): A [U-ViT](https://github.com/baofff/U-ViT) model with UNNet-style skip connections between transformer layers to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image and/or text latents. The original UniDiffuser paper uses the [`DPMSolverMultistepScheduler`] scheduler. """ # TODO: support for moving submodules for components with enable_model_cpu_offload model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae->text_decoder" def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, image_encoder: CLIPVisionModelWithProjection, clip_image_processor: CLIPImageProcessor, clip_tokenizer: CLIPTokenizer, text_decoder: UniDiffuserTextDecoder, text_tokenizer: GPT2Tokenizer, unet: UniDiffuserModel, scheduler: KarrasDiffusionSchedulers, ): super().__init__() if text_encoder.config.hidden_size != text_decoder.prefix_inner_dim: raise ValueError( f"The text encoder hidden size and text decoder prefix inner dim must be the same, but" f" `text_encoder.config.hidden_size`: {text_encoder.config.hidden_size} and `text_decoder.prefix_inner_dim`: {text_decoder.prefix_inner_dim}" ) self.register_modules( vae=vae, text_encoder=text_encoder, image_encoder=image_encoder, clip_image_processor=clip_image_processor, clip_tokenizer=clip_tokenizer, text_decoder=text_decoder, text_tokenizer=text_tokenizer, unet=unet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.num_channels_latents = vae.config.latent_channels self.text_encoder_seq_len = text_encoder.config.max_position_embeddings self.text_encoder_hidden_size = text_encoder.config.hidden_size self.image_encoder_projection_dim = image_encoder.config.projection_dim self.unet_resolution = unet.config.sample_size self.text_intermediate_dim = self.text_encoder_hidden_size if self.text_decoder.prefix_hidden_dim is not None: self.text_intermediate_dim = self.text_decoder.prefix_hidden_dim self.mode = None # TODO: handle safety checking? self.safety_checker = None # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def _infer_mode(self, prompt, prompt_embeds, image, latents, prompt_latents, vae_latents, clip_latents): r""" Infer the generation task ('mode') from the inputs to `__call__`. If the mode has been manually set, the set mode will be used. """ prompt_available = (prompt is not None) or (prompt_embeds is not None) image_available = image is not None input_available = prompt_available or image_available prompt_latents_available = prompt_latents is not None vae_latents_available = vae_latents is not None clip_latents_available = clip_latents is not None full_latents_available = latents is not None image_latents_available = vae_latents_available and clip_latents_available all_indv_latents_available = prompt_latents_available and image_latents_available if self.mode is not None: # Preferentially use the mode set by the user mode = self.mode elif prompt_available: mode = "text2img" elif image_available: mode = "img2text" else: # Neither prompt nor image supplied, infer based on availability of latents if full_latents_available or all_indv_latents_available: mode = "joint" elif prompt_latents_available: mode = "text" elif image_latents_available: mode = "img" else: # No inputs or latents available mode = "joint" # Give warnings for ambiguous cases if self.mode is None and prompt_available and image_available: logger.warning( f"You have supplied both a text prompt and image to the pipeline and mode has not been set manually," f" defaulting to mode '{mode}'." ) if self.mode is None and not input_available: if vae_latents_available != clip_latents_available: # Exactly one of vae_latents and clip_latents is supplied logger.warning( f"You have supplied exactly one of `vae_latents` and `clip_latents`, whereas either both or none" f" are expected to be supplied. Defaulting to mode '{mode}'." ) elif not prompt_latents_available and not vae_latents_available and not clip_latents_available: # No inputs or latents supplied logger.warning( f"No inputs or latents have been supplied, and mode has not been manually set," f" defaulting to mode '{mode}'." ) return mode # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.enable_vae_slicing def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_slicing def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.enable_vae_tiling def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ self.vae.enable_tiling() # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_tiling def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_tiling() # Functions to manually set the mode def set_text_mode(self): r"""Manually set the generation mode to unconditional ("marginal") text generation.""" self.mode = "text" def set_image_mode(self): r"""Manually set the generation mode to unconditional ("marginal") image generation.""" self.mode = "img" def set_text_to_image_mode(self): r"""Manually set the generation mode to text-conditioned image generation.""" self.mode = "text2img" def set_image_to_text_mode(self): r"""Manually set the generation mode to image-conditioned text generation.""" self.mode = "img2text" def set_joint_mode(self): r"""Manually set the generation mode to unconditional joint image-text generation.""" self.mode = "joint" def reset_mode(self): r"""Removes a manually set mode; after calling this, the pipeline will infer the mode from inputs.""" self.mode = None def _infer_batch_size( self, mode, prompt, prompt_embeds, image, num_images_per_prompt, num_prompts_per_image, latents, prompt_latents, vae_latents, clip_latents, ): r"""Infers the batch size and multiplier depending on mode and supplied arguments to `__call__`.""" if num_images_per_prompt is None: num_images_per_prompt = 1 if num_prompts_per_image is None: num_prompts_per_image = 1 assert num_images_per_prompt > 0, "num_images_per_prompt must be a positive integer" assert num_prompts_per_image > 0, "num_prompts_per_image must be a positive integer" if mode in ["text2img"]: if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: # Either prompt or prompt_embeds must be present for text2img. batch_size = prompt_embeds.shape[0] multiplier = num_images_per_prompt elif mode in ["img2text"]: if isinstance(image, PIL.Image.Image): batch_size = 1 else: # Image must be available and type either PIL.Image.Image or torch.Tensor. # Not currently supporting something like image_embeds. batch_size = image.shape[0] multiplier = num_prompts_per_image elif mode in ["img"]: if vae_latents is not None: batch_size = vae_latents.shape[0] elif clip_latents is not None: batch_size = clip_latents.shape[0] else: batch_size = 1 multiplier = num_images_per_prompt elif mode in ["text"]: if prompt_latents is not None: batch_size = prompt_latents.shape[0] else: batch_size = 1 multiplier = num_prompts_per_image elif mode in ["joint"]: if latents is not None: batch_size = latents.shape[0] elif prompt_latents is not None: batch_size = prompt_latents.shape[0] elif vae_latents is not None: batch_size = vae_latents.shape[0] elif clip_latents is not None: batch_size = clip_latents.shape[0] else: batch_size = 1 if num_images_per_prompt == num_prompts_per_image: multiplier = num_images_per_prompt else: multiplier = min(num_images_per_prompt, num_prompts_per_image) logger.warning( f"You are using mode `{mode}` and `num_images_per_prompt`: {num_images_per_prompt} and" f" num_prompts_per_image: {num_prompts_per_image} are not equal. Using batch size equal to" f" `min(num_images_per_prompt, num_prompts_per_image) = {batch_size}." ) return batch_size, multiplier # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with self.tokenizer->self.clip_tokenizer def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.clip_tokenizer) text_inputs = self.clip_tokenizer( prompt, padding="max_length", max_length=self.clip_tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.clip_tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.clip_tokenizer.batch_decode( untruncated_ids[:, self.clip_tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.clip_tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.clip_tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.clip_tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_instruct_pix2pix.StableDiffusionInstructPix2PixPipeline.prepare_image_latents # Add num_prompts_per_image argument, sample from autoencoder moment distribution def encode_image_vae_latents( self, image, batch_size, num_prompts_per_image, dtype, device, do_classifier_free_guidance, generator=None, ): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_prompts_per_image if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if isinstance(generator, list): image_latents = [ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator=generator[i]) * self.vae.config.scaling_factor for i in range(batch_size) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = self.vae.encode(image).latent_dist.sample(generator=generator) # Scale image_latents by the VAE's scaling factor image_latents = image_latents * self.vae.config.scaling_factor if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: # expand image_latents for batch_size deprecation_message = ( f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial" " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" " your script to pass as many initial images as text prompts to suppress this warning." ) deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." ) else: image_latents = torch.cat([image_latents], dim=0) if do_classifier_free_guidance: uncond_image_latents = torch.zeros_like(image_latents) image_latents = torch.cat([image_latents, image_latents, uncond_image_latents], dim=0) return image_latents def encode_image_clip_latents( self, image, batch_size, num_prompts_per_image, dtype, device, generator=None, ): # Map image to CLIP embedding. if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) preprocessed_image = self.clip_image_processor.preprocess( image, return_tensors="pt", ) preprocessed_image = preprocessed_image.to(device=device, dtype=dtype) batch_size = batch_size * num_prompts_per_image if isinstance(generator, list): image_latents = [ self.image_encoder(**preprocessed_image[i : i + 1]).image_embeds for i in range(batch_size) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = self.image_encoder(**preprocessed_image).image_embeds if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: # expand image_latents for batch_size deprecation_message = ( f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial" " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" " your script to pass as many initial images as text prompts to suppress this warning." ) deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." ) else: image_latents = torch.cat([image_latents], dim=0) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) return image_latents def prepare_text_latents( self, batch_size, num_images_per_prompt, seq_len, hidden_size, dtype, device, generator, latents=None ): # Prepare latents for the CLIP embedded prompt. shape = (batch_size * num_images_per_prompt, seq_len, hidden_size) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: # latents is assumed to have shace (B, L, D) latents = latents.repeat(num_images_per_prompt, 1, 1) latents = latents.to(device=device, dtype=dtype) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents # Rename prepare_latents -> prepare_image_vae_latents and add num_prompts_per_image argument. def prepare_image_vae_latents( self, batch_size, num_prompts_per_image, num_channels_latents, height, width, dtype, device, generator, latents=None, ): shape = ( batch_size * num_prompts_per_image, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: # latents is assumed to have shape (B, C, H, W) latents = latents.repeat(num_prompts_per_image, 1, 1, 1) latents = latents.to(device=device, dtype=dtype) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_image_clip_latents( self, batch_size, num_prompts_per_image, clip_img_dim, dtype, device, generator, latents=None ): # Prepare latents for the CLIP embedded image. shape = (batch_size * num_prompts_per_image, 1, clip_img_dim) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: # latents is assumed to have shape (B, L, D) latents = latents.repeat(num_prompts_per_image, 1, 1) latents = latents.to(device=device, dtype=dtype) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def decode_text_latents(self, text_latents, device): output_token_list, seq_lengths = self.text_decoder.generate_captions( text_latents, self.text_tokenizer.eos_token_id, device=device ) output_list = output_token_list.cpu().numpy() generated_text = [ self.text_tokenizer.decode(output[: int(length)], skip_special_tokens=True) for output, length in zip(output_list, seq_lengths) ] return generated_text def _split(self, x, height, width): r""" Splits a flattened embedding x of shape (B, C * H * W + clip_img_dim) into two tensors of shape (B, C, H, W) and (B, 1, clip_img_dim) """ batch_size = x.shape[0] latent_height = height // self.vae_scale_factor latent_width = width // self.vae_scale_factor img_vae_dim = self.num_channels_latents * latent_height * latent_width img_vae, img_clip = x.split([img_vae_dim, self.image_encoder_projection_dim], dim=1) img_vae = torch.reshape(img_vae, (batch_size, self.num_channels_latents, latent_height, latent_width)) img_clip = torch.reshape(img_clip, (batch_size, 1, self.image_encoder_projection_dim)) return img_vae, img_clip def _combine(self, img_vae, img_clip): r""" Combines a latent iamge img_vae of shape (B, C, H, W) and a CLIP-embedded image img_clip of shape (B, 1, clip_img_dim) into a single tensor of shape (B, C * H * W + clip_img_dim). """ img_vae = torch.reshape(img_vae, (img_vae.shape[0], -1)) img_clip = torch.reshape(img_clip, (img_clip.shape[0], -1)) return torch.concat([img_vae, img_clip], dim=-1) def _split_joint(self, x, height, width): r""" Splits a flattened embedding x of shape (B, C * H * W + clip_img_dim + text_seq_len * text_dim] into (img_vae, img_clip, text) where img_vae is of shape (B, C, H, W), img_clip is of shape (B, 1, clip_img_dim), and text is of shape (B, text_seq_len, text_dim). """ batch_size = x.shape[0] latent_height = height // self.vae_scale_factor latent_width = width // self.vae_scale_factor img_vae_dim = self.num_channels_latents * latent_height * latent_width text_dim = self.text_encoder_seq_len * self.text_intermediate_dim img_vae, img_clip, text = x.split([img_vae_dim, self.image_encoder_projection_dim, text_dim], dim=1) img_vae = torch.reshape(img_vae, (batch_size, self.num_channels_latents, latent_height, latent_width)) img_clip = torch.reshape(img_clip, (batch_size, 1, self.image_encoder_projection_dim)) text = torch.reshape(text, (batch_size, self.text_encoder_seq_len, self.text_intermediate_dim)) return img_vae, img_clip, text def _combine_joint(self, img_vae, img_clip, text): r""" Combines a latent image img_vae of shape (B, C, H, W), a CLIP-embedded image img_clip of shape (B, L_img, clip_img_dim), and a text embedding text of shape (B, L_text, text_dim) into a single embedding x of shape (B, C * H * W + L_img * clip_img_dim + L_text * text_dim). """ img_vae = torch.reshape(img_vae, (img_vae.shape[0], -1)) img_clip = torch.reshape(img_clip, (img_clip.shape[0], -1)) text = torch.reshape(text, (text.shape[0], -1)) return torch.concat([img_vae, img_clip, text], dim=-1) def _get_noise_pred( self, mode, latents, t, prompt_embeds, img_vae, img_clip, max_timestep, data_type, guidance_scale, generator, device, height, width, ): r""" Gets the noise prediction using the `unet` and performs classifier-free guidance, if necessary. """ if mode == "joint": # Joint text-image generation img_vae_latents, img_clip_latents, text_latents = self._split_joint(latents, height, width) img_vae_out, img_clip_out, text_out = self.unet( img_vae_latents, img_clip_latents, text_latents, timestep_img=t, timestep_text=t, data_type=data_type ) x_out = self._combine_joint(img_vae_out, img_clip_out, text_out) if guidance_scale <= 1.0: return x_out # Classifier-free guidance img_vae_T = randn_tensor(img_vae.shape, generator=generator, device=device, dtype=img_vae.dtype) img_clip_T = randn_tensor(img_clip.shape, generator=generator, device=device, dtype=img_clip.dtype) text_T = randn_tensor(prompt_embeds.shape, generator=generator, device=device, dtype=prompt_embeds.dtype) _, _, text_out_uncond = self.unet( img_vae_T, img_clip_T, text_latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type ) img_vae_out_uncond, img_clip_out_uncond, _ = self.unet( img_vae_latents, img_clip_latents, text_T, timestep_img=t, timestep_text=max_timestep, data_type=data_type, ) x_out_uncond = self._combine_joint(img_vae_out_uncond, img_clip_out_uncond, text_out_uncond) return guidance_scale * x_out + (1.0 - guidance_scale) * x_out_uncond elif mode == "text2img": # Text-conditioned image generation img_vae_latents, img_clip_latents = self._split(latents, height, width) img_vae_out, img_clip_out, text_out = self.unet( img_vae_latents, img_clip_latents, prompt_embeds, timestep_img=t, timestep_text=0, data_type=data_type ) img_out = self._combine(img_vae_out, img_clip_out) if guidance_scale <= 1.0: return img_out # Classifier-free guidance text_T = randn_tensor(prompt_embeds.shape, generator=generator, device=device, dtype=prompt_embeds.dtype) img_vae_out_uncond, img_clip_out_uncond, text_out_uncond = self.unet( img_vae_latents, img_clip_latents, text_T, timestep_img=t, timestep_text=max_timestep, data_type=data_type, ) img_out_uncond = self._combine(img_vae_out_uncond, img_clip_out_uncond) return guidance_scale * img_out + (1.0 - guidance_scale) * img_out_uncond elif mode == "img2text": # Image-conditioned text generation img_vae_out, img_clip_out, text_out = self.unet( img_vae, img_clip, latents, timestep_img=0, timestep_text=t, data_type=data_type ) if guidance_scale <= 1.0: return text_out # Classifier-free guidance img_vae_T = randn_tensor(img_vae.shape, generator=generator, device=device, dtype=img_vae.dtype) img_clip_T = randn_tensor(img_clip.shape, generator=generator, device=device, dtype=img_clip.dtype) img_vae_out_uncond, img_clip_out_uncond, text_out_uncond = self.unet( img_vae_T, img_clip_T, latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type ) return guidance_scale * text_out + (1.0 - guidance_scale) * text_out_uncond elif mode == "text": # Unconditional ("marginal") text generation (no CFG) img_vae_out, img_clip_out, text_out = self.unet( img_vae, img_clip, latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type ) return text_out elif mode == "img": # Unconditional ("marginal") image generation (no CFG) img_vae_latents, img_clip_latents = self._split(latents, height, width) img_vae_out, img_clip_out, text_out = self.unet( img_vae_latents, img_clip_latents, prompt_embeds, timestep_img=t, timestep_text=max_timestep, data_type=data_type, ) img_out = self._combine(img_vae_out, img_clip_out) return img_out def check_latents_shape(self, latents_name, latents, expected_shape): latents_shape = latents.shape expected_num_dims = len(expected_shape) + 1 # expected dimensions plus the batch dimension expected_shape_str = ", ".join(str(dim) for dim in expected_shape) if len(latents_shape) != expected_num_dims: raise ValueError( f"`{latents_name}` should have shape (batch_size, {expected_shape_str}), but the current shape" f" {latents_shape} has {len(latents_shape)} dimensions." ) for i in range(1, expected_num_dims): if latents_shape[i] != expected_shape[i - 1]: raise ValueError( f"`{latents_name}` should have shape (batch_size, {expected_shape_str}), but the current shape" f" {latents_shape} has {latents_shape[i]} != {expected_shape[i - 1]} at dimension {i}." ) def check_inputs( self, mode, prompt, image, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, latents=None, prompt_latents=None, vae_latents=None, clip_latents=None, ): # Check inputs before running the generative process. if height % self.vae_scale_factor != 0 or width % self.vae_scale_factor != 0: raise ValueError( f"`height` and `width` have to be divisible by {self.vae_scale_factor} but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if mode == "text2img": if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if mode == "img2text": if image is None: raise ValueError("`img2text` mode requires an image to be provided.") # Check provided latents latent_height = height // self.vae_scale_factor latent_width = width // self.vae_scale_factor full_latents_available = latents is not None prompt_latents_available = prompt_latents is not None vae_latents_available = vae_latents is not None clip_latents_available = clip_latents is not None if full_latents_available: individual_latents_available = ( prompt_latents is not None or vae_latents is not None or clip_latents is not None ) if individual_latents_available: logger.warning( "You have supplied both `latents` and at least one of `prompt_latents`, `vae_latents`, and" " `clip_latents`. The value of `latents` will override the value of any individually supplied latents." ) # Check shape of full latents img_vae_dim = self.num_channels_latents * latent_height * latent_width text_dim = self.text_encoder_seq_len * self.text_encoder_hidden_size latents_dim = img_vae_dim + self.image_encoder_projection_dim + text_dim latents_expected_shape = (latents_dim,) self.check_latents_shape("latents", latents, latents_expected_shape) # Check individual latent shapes, if present if prompt_latents_available: prompt_latents_expected_shape = (self.text_encoder_seq_len, self.text_encoder_hidden_size) self.check_latents_shape("prompt_latents", prompt_latents, prompt_latents_expected_shape) if vae_latents_available: vae_latents_expected_shape = (self.num_channels_latents, latent_height, latent_width) self.check_latents_shape("vae_latents", vae_latents, vae_latents_expected_shape) if clip_latents_available: clip_latents_expected_shape = (1, self.image_encoder_projection_dim) self.check_latents_shape("clip_latents", clip_latents, clip_latents_expected_shape) if mode in ["text2img", "img"] and vae_latents_available and clip_latents_available: if vae_latents.shape[0] != clip_latents.shape[0]: raise ValueError( f"Both `vae_latents` and `clip_latents` are supplied, but their batch dimensions are not equal:" f" {vae_latents.shape[0]} != {clip_latents.shape[0]}." ) if mode == "joint" and prompt_latents_available and vae_latents_available and clip_latents_available: if prompt_latents.shape[0] != vae_latents.shape[0] or prompt_latents.shape[0] != clip_latents.shape[0]: raise ValueError( f"All of `prompt_latents`, `vae_latents`, and `clip_latents` are supplied, but their batch" f" dimensions are not equal: {prompt_latents.shape[0]} != {vae_latents.shape[0]}" f" != {clip_latents.shape[0]}." ) @torch.no_grad() def __call__( self, prompt: Optional[Union[str, List[str]]] = None, image: Optional[Union[torch.Tensor, PIL.Image.Image]] = None, height: Optional[int] = None, width: Optional[int] = None, data_type: Optional[int] = 1, num_inference_steps: int = 50, guidance_scale: float = 8.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, num_prompts_per_image: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_latents: Optional[torch.Tensor] = None, vae_latents: Optional[torch.Tensor] = None, clip_latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. Required for text-conditioned image generation (`text2img`) mode. image (`torch.Tensor` or `PIL.Image.Image`, *optional*): `Image` or tensor representing an image batch. Required for image-conditioned text generation (`img2text`) mode. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. data_type (`int`, *optional*, defaults to 1): The data type (either 0 or 1). Only used if you are loading a checkpoint which supports a data type embedding; this is added for compatibility with the [UniDiffuser-v1](https://huggingface.co/thu-ml/unidiffuser-v1) checkpoint. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 8.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). Used in text-conditioned image generation (`text2img`) mode. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. Used in `text2img` (text-conditioned image generation) and `img` mode. If the mode is joint and both `num_images_per_prompt` and `num_prompts_per_image` are supplied, `min(num_images_per_prompt, num_prompts_per_image)` samples are generated. num_prompts_per_image (`int`, *optional*, defaults to 1): The number of prompts to generate per image. Used in `img2text` (image-conditioned text generation) and `text` mode. If the mode is joint and both `num_images_per_prompt` and `num_prompts_per_image` are supplied, `min(num_images_per_prompt, num_prompts_per_image)` samples are generated. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for joint image-text generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. This assumes a full set of VAE, CLIP, and text latents, if supplied, overrides the value of `prompt_latents`, `vae_latents`, and `clip_latents`. prompt_latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for text generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. vae_latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. clip_latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. Used in text-conditioned image generation (`text2img`) mode. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are be generated from the `negative_prompt` input argument. Used in text-conditioned image generation (`text2img`) mode. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImageTextPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. Returns: [`~pipelines.unidiffuser.ImageTextPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.unidiffuser.ImageTextPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of generated texts. """ # 0. Default height and width to unet height = height or self.unet_resolution * self.vae_scale_factor width = width or self.unet_resolution * self.vae_scale_factor # 1. Check inputs # Recalculate mode for each call to the pipeline. mode = self._infer_mode(prompt, prompt_embeds, image, latents, prompt_latents, vae_latents, clip_latents) self.check_inputs( mode, prompt, image, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, latents, prompt_latents, vae_latents, clip_latents, ) # 2. Define call parameters batch_size, multiplier = self._infer_batch_size( mode, prompt, prompt_embeds, image, num_images_per_prompt, num_prompts_per_image, latents, prompt_latents, vae_latents, clip_latents, ) device = self._execution_device reduce_text_emb_dim = self.text_intermediate_dim < self.text_encoder_hidden_size or self.mode != "text2img" # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. # Note that this differs from the formulation in the unidiffusers paper! do_classifier_free_guidance = guidance_scale > 1.0 # check if scheduler is in sigmas space # scheduler_is_in_sigma_space = hasattr(self.scheduler, "sigmas") # 3. Encode input prompt, if available; otherwise prepare text latents if latents is not None: # Overwrite individual latents vae_latents, clip_latents, prompt_latents = self._split_joint(latents, height, width) if mode in ["text2img"]: # 3.1. Encode input prompt, if available assert prompt is not None or prompt_embeds is not None prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=multiplier, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # if do_classifier_free_guidance: # prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) else: # 3.2. Prepare text latent variables, if input not available prompt_embeds = self.prepare_text_latents( batch_size=batch_size, num_images_per_prompt=multiplier, seq_len=self.text_encoder_seq_len, hidden_size=self.text_encoder_hidden_size, dtype=self.text_encoder.dtype, # Should work with both full precision and mixed precision device=device, generator=generator, latents=prompt_latents, ) if reduce_text_emb_dim: prompt_embeds = self.text_decoder.encode(prompt_embeds) # 4. Encode image, if available; otherwise prepare image latents if mode in ["img2text"]: # 4.1. Encode images, if available assert image is not None, "`img2text` requires a conditioning image" # Encode image using VAE image_vae = self.image_processor.preprocess(image) height, width = image_vae.shape[-2:] image_vae_latents = self.encode_image_vae_latents( image=image_vae, batch_size=batch_size, num_prompts_per_image=multiplier, dtype=prompt_embeds.dtype, device=device, do_classifier_free_guidance=False, # Copied from InstructPix2Pix, don't use their version of CFG generator=generator, ) # Encode image using CLIP image_clip_latents = self.encode_image_clip_latents( image=image, batch_size=batch_size, num_prompts_per_image=multiplier, dtype=prompt_embeds.dtype, device=device, generator=generator, ) # (batch_size, clip_hidden_size) => (batch_size, 1, clip_hidden_size) image_clip_latents = image_clip_latents.unsqueeze(1) else: # 4.2. Prepare image latent variables, if input not available # Prepare image VAE latents in latent space image_vae_latents = self.prepare_image_vae_latents( batch_size=batch_size, num_prompts_per_image=multiplier, num_channels_latents=self.num_channels_latents, height=height, width=width, dtype=prompt_embeds.dtype, device=device, generator=generator, latents=vae_latents, ) # Prepare image CLIP latents image_clip_latents = self.prepare_image_clip_latents( batch_size=batch_size, num_prompts_per_image=multiplier, clip_img_dim=self.image_encoder_projection_dim, dtype=prompt_embeds.dtype, device=device, generator=generator, latents=clip_latents, ) # 5. Set timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # max_timestep = timesteps[0] max_timestep = self.scheduler.config.num_train_timesteps # 6. Prepare latent variables if mode == "joint": latents = self._combine_joint(image_vae_latents, image_clip_latents, prompt_embeds) elif mode in ["text2img", "img"]: latents = self._combine(image_vae_latents, image_clip_latents) elif mode in ["img2text", "text"]: latents = prompt_embeds # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) logger.debug(f"Scheduler extra step kwargs: {extra_step_kwargs}") # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # predict the noise residual # Also applies classifier-free guidance as described in the UniDiffuser paper noise_pred = self._get_noise_pred( mode, latents, t, prompt_embeds, image_vae_latents, image_clip_latents, max_timestep, data_type, guidance_scale, generator, device, height, width, ) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # 9. Post-processing image = None text = None if mode == "joint": image_vae_latents, image_clip_latents, text_latents = self._split_joint(latents, height, width) if not output_type == "latent": # Map latent VAE image back to pixel space image = self.vae.decode(image_vae_latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = image_vae_latents text = self.decode_text_latents(text_latents, device) elif mode in ["text2img", "img"]: image_vae_latents, image_clip_latents = self._split(latents, height, width) if not output_type == "latent": # Map latent VAE image back to pixel space image = self.vae.decode(image_vae_latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = image_vae_latents elif mode in ["img2text", "text"]: text_latents = latents text = self.decode_text_latents(text_latents, device) self.maybe_free_model_hooks() # 10. Postprocess the image, if necessary if image is not None: do_denormalize = [True] * image.shape[0] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image, text) return ImageTextPipelineOutput(images=image, text=text)
diffusers/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py", "repo_id": "diffusers", "token_count": 30997 }
150
import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.torch_utils import randn_tensor from .scheduling_utils import SchedulerMixin # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) @dataclass class ConsistencyDecoderSchedulerOutput(BaseOutput): """ Output class for the scheduler's `step` function. Args: prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. """ prev_sample: torch.Tensor class ConsistencyDecoderScheduler(SchedulerMixin, ConfigMixin): order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1024, sigma_data: float = 0.5, ): betas = betas_for_alpha_bar(num_train_timesteps) alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) self.sqrt_alphas_cumprod = torch.sqrt(alphas_cumprod) self.sqrt_one_minus_alphas_cumprod = torch.sqrt(1.0 - alphas_cumprod) sigmas = torch.sqrt(1.0 / alphas_cumprod - 1) sqrt_recip_alphas_cumprod = torch.sqrt(1.0 / alphas_cumprod) self.c_skip = sqrt_recip_alphas_cumprod * sigma_data**2 / (sigmas**2 + sigma_data**2) self.c_out = sigmas * sigma_data / (sigmas**2 + sigma_data**2) ** 0.5 self.c_in = sqrt_recip_alphas_cumprod / (sigmas**2 + sigma_data**2) ** 0.5 def set_timesteps( self, num_inference_steps: Optional[int] = None, device: Union[str, torch.device] = None, ): if num_inference_steps != 2: raise ValueError("Currently more than 2 inference steps are not supported.") self.timesteps = torch.tensor([1008, 512], dtype=torch.long, device=device) self.sqrt_alphas_cumprod = self.sqrt_alphas_cumprod.to(device) self.sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod.to(device) self.c_skip = self.c_skip.to(device) self.c_out = self.c_out.to(device) self.c_in = self.c_in.to(device) @property def init_noise_sigma(self): return self.sqrt_one_minus_alphas_cumprod[self.timesteps[0]] def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.Tensor`): The input sample. timestep (`int`, *optional*): The current timestep in the diffusion chain. Returns: `torch.Tensor`: A scaled input sample. """ return sample * self.c_in[timestep] def step( self, model_output: torch.Tensor, timestep: Union[float, torch.Tensor], sample: torch.Tensor, generator: Optional[torch.Generator] = None, return_dict: bool = True, ) -> Union[ConsistencyDecoderSchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.Tensor`): The direct output from the learned diffusion model. timestep (`float`): The current timestep in the diffusion chain. sample (`torch.Tensor`): A current instance of a sample created by the diffusion process. generator (`torch.Generator`, *optional*): A random number generator. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_consistency_models.ConsistencyDecoderSchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_consistency_models.ConsistencyDecoderSchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_consistency_models.ConsistencyDecoderSchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ x_0 = self.c_out[timestep] * model_output + self.c_skip[timestep] * sample timestep_idx = torch.where(self.timesteps == timestep)[0] if timestep_idx == len(self.timesteps) - 1: prev_sample = x_0 else: noise = randn_tensor(x_0.shape, generator=generator, dtype=x_0.dtype, device=x_0.device) prev_sample = ( self.sqrt_alphas_cumprod[self.timesteps[timestep_idx + 1]].to(x_0.dtype) * x_0 + self.sqrt_one_minus_alphas_cumprod[self.timesteps[timestep_idx + 1]].to(x_0.dtype) * noise ) if not return_dict: return (prev_sample,) return ConsistencyDecoderSchedulerOutput(prev_sample=prev_sample)
diffusers/src/diffusers/schedulers/scheduling_consistency_decoder.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_consistency_decoder.py", "repo_id": "diffusers", "token_count": 3023 }
151
# Copyright 2024 Katherine Crowson and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import warnings from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from scipy import integrate from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->LMSDiscrete class LMSDiscreteSchedulerOutput(BaseOutput): """ Output class for the scheduler's `step` function output. Args: prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample `(x_{0})` based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. """ prev_sample: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class LMSDiscreteScheduler(SchedulerMixin, ConfigMixin): """ A linear multistep scheduler for discrete beta schedules. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. beta_start (`float`, defaults to 0.0001): The starting `beta` value of inference. beta_end (`float`, defaults to 0.02): The final `beta` value. beta_schedule (`str`, defaults to `"linear"`): The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear` or `scaled_linear`. trained_betas (`np.ndarray`, *optional*): Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. use_karras_sigmas (`bool`, *optional*, defaults to `False`): Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, the sigmas are determined according to a sequence of noise levels {σi}. prediction_type (`str`, defaults to `epsilon`, *optional*): Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen Video](https://imagen.research.google/video/paper.pdf) paper). timestep_spacing (`str`, defaults to `"linspace"`): The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. steps_offset (`int`, defaults to 0): An offset added to the inference steps, as required by some model families. """ _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[Union[np.ndarray, List[float]]] = None, use_karras_sigmas: Optional[bool] = False, prediction_type: str = "epsilon", timestep_spacing: str = "linspace", steps_offset: int = 0, ): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) sigmas = np.concatenate([sigmas[::-1], [0.0]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas) # setable values self.num_inference_steps = None self.use_karras_sigmas = use_karras_sigmas self.set_timesteps(num_train_timesteps, None) self.derivatives = [] self.is_scale_input_called = False self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication @property def init_noise_sigma(self): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 @property def step_index(self): """ The index counter for current timestep. It will increase 1 after each scheduler step. """ return self._step_index @property def begin_index(self): """ The index for the first timestep. It should be set from pipeline with `set_begin_index` method. """ return self._begin_index # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index def set_begin_index(self, begin_index: int = 0): """ Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Args: begin_index (`int`): The begin index for the scheduler. """ self._begin_index = begin_index def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.Tensor`): The input sample. timestep (`float` or `torch.Tensor`): The current timestep in the diffusion chain. Returns: `torch.Tensor`: A scaled input sample. """ if self.step_index is None: self._init_step_index(timestep) sigma = self.sigmas[self.step_index] sample = sample / ((sigma**2 + 1) ** 0.5) self.is_scale_input_called = True return sample def get_lms_coefficient(self, order, t, current_order): """ Compute the linear multistep coefficient. Args: order (): t (): current_order (): """ def lms_derivative(tau): prod = 1.0 for k in range(order): if current_order == k: continue prod *= (tau - self.sigmas[t - k]) / (self.sigmas[t - current_order] - self.sigmas[t - k]) return prod integrated_coeff = integrate.quad(lms_derivative, self.sigmas[t], self.sigmas[t + 1], epsrel=1e-4)[0] return integrated_coeff def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. """ self.num_inference_steps = num_inference_steps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[ ::-1 ].copy() elif self.config.timestep_spacing == "leading": step_ratio = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": step_ratio = self.config.num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = (np.arange(self.config.num_train_timesteps, 0, -step_ratio)).round().copy().astype(np.float32) timesteps -= 1 else: raise ValueError( f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." ) sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) log_sigmas = np.log(sigmas) sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) if self.config.use_karras_sigmas: sigmas = self._convert_to_karras(in_sigmas=sigmas) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas).to(device=device) self.timesteps = torch.from_numpy(timesteps).to(device=device) self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication self.derivatives = [] # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) pos = 1 if len(indices) > 1 else 0 return indices[pos].item() # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t def _sigma_to_t(self, sigma, log_sigmas): # get log sigma log_sigma = np.log(np.maximum(sigma, 1e-10)) # get distribution dists = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] # interpolate sigmas w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) # transform interpolation to time range t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t # copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras def _convert_to_karras(self, in_sigmas: torch.Tensor) -> torch.Tensor: """Constructs the noise schedule of Karras et al. (2022).""" sigma_min: float = in_sigmas[-1].item() sigma_max: float = in_sigmas[0].item() rho = 7.0 # 7.0 is the value used in the paper ramp = np.linspace(0, 1, self.num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def step( self, model_output: torch.Tensor, timestep: Union[float, torch.Tensor], sample: torch.Tensor, order: int = 4, return_dict: bool = True, ) -> Union[LMSDiscreteSchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.Tensor`): The direct output from learned diffusion model. timestep (`float` or `torch.Tensor`): The current discrete timestep in the diffusion chain. sample (`torch.Tensor`): A current instance of a sample created by the diffusion process. order (`int`, defaults to 4): The order of the linear multistep method. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. Returns: [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ if not self.is_scale_input_called: warnings.warn( "The `scale_model_input` function should be called before `step` to ensure correct denoising. " "See `StableDiffusionPipeline` for a usage example." ) if self.step_index is None: self._init_step_index(timestep) sigma = self.sigmas[self.step_index] # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": pred_original_sample = sample - sigma * model_output elif self.config.prediction_type == "v_prediction": # * c_out + input * c_skip pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1)) elif self.config.prediction_type == "sample": pred_original_sample = model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" ) # 2. Convert to an ODE derivative derivative = (sample - pred_original_sample) / sigma self.derivatives.append(derivative) if len(self.derivatives) > order: self.derivatives.pop(0) # 3. Compute linear multistep coefficients order = min(self.step_index + 1, order) lms_coeffs = [self.get_lms_coefficient(order, self.step_index, curr_order) for curr_order in range(order)] # 4. Compute previous sample based on the derivatives path prev_sample = sample + sum( coeff * derivative for coeff, derivative in zip(lms_coeffs, reversed(self.derivatives)) ) # upon completion increase step index by one self._step_index += 1 if not return_dict: return (prev_sample,) return LMSDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise def add_noise( self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor, ) -> torch.Tensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): # mps does not support float64 schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: # add_noise is called after first denoising step (for inpainting) step_indices = [self.step_index] * timesteps.shape[0] else: # add noise is called before first denoising step to create initial latent(img2img) step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) noisy_samples = original_samples + noise * sigma return noisy_samples def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_lms_discrete.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_lms_discrete.py", "repo_id": "diffusers", "token_count": 8795 }
152
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class SpectrogramDiffusionPipeline(metaclass=DummyObject): _backends = ["transformers", "torch", "note_seq"] def __init__(self, *args, **kwargs): requires_backends(self, ["transformers", "torch", "note_seq"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["transformers", "torch", "note_seq"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["transformers", "torch", "note_seq"])
diffusers/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py", "repo_id": "diffusers", "token_count": 236 }
153
import tempfile import unittest import numpy as np import torch from diffusers import DiffusionPipeline from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor class AttnAddedKVProcessorTests(unittest.TestCase): def get_constructor_arguments(self, only_cross_attention: bool = False): query_dim = 10 if only_cross_attention: cross_attention_dim = 12 else: # when only cross attention is not set, the cross attention dim must be the same as the query dim cross_attention_dim = query_dim return { "query_dim": query_dim, "cross_attention_dim": cross_attention_dim, "heads": 2, "dim_head": 4, "added_kv_proj_dim": 6, "norm_num_groups": 1, "only_cross_attention": only_cross_attention, "processor": AttnAddedKVProcessor(), } def get_forward_arguments(self, query_dim, added_kv_proj_dim): batch_size = 2 hidden_states = torch.rand(batch_size, query_dim, 3, 2) encoder_hidden_states = torch.rand(batch_size, 4, added_kv_proj_dim) attention_mask = None return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "attention_mask": attention_mask, } def test_only_cross_attention(self): # self and cross attention torch.manual_seed(0) constructor_args = self.get_constructor_arguments(only_cross_attention=False) attn = Attention(**constructor_args) self.assertTrue(attn.to_k is not None) self.assertTrue(attn.to_v is not None) forward_args = self.get_forward_arguments( query_dim=constructor_args["query_dim"], added_kv_proj_dim=constructor_args["added_kv_proj_dim"] ) self_and_cross_attn_out = attn(**forward_args) # only self attention torch.manual_seed(0) constructor_args = self.get_constructor_arguments(only_cross_attention=True) attn = Attention(**constructor_args) self.assertTrue(attn.to_k is None) self.assertTrue(attn.to_v is None) forward_args = self.get_forward_arguments( query_dim=constructor_args["query_dim"], added_kv_proj_dim=constructor_args["added_kv_proj_dim"] ) only_cross_attn_out = attn(**forward_args) self.assertTrue((only_cross_attn_out != self_and_cross_attn_out).all()) class DeprecatedAttentionBlockTests(unittest.TestCase): def test_conversion_when_using_device_map(self): pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) pre_conversion = pipe( "foo", num_inference_steps=2, generator=torch.Generator("cpu").manual_seed(0), output_type="np", ).images # the initial conversion succeeds pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", device_map="balanced", safety_checker=None ) conversion = pipe( "foo", num_inference_steps=2, generator=torch.Generator("cpu").manual_seed(0), output_type="np", ).images with tempfile.TemporaryDirectory() as tmpdir: # save the converted model pipe.save_pretrained(tmpdir) # can also load the converted weights pipe = DiffusionPipeline.from_pretrained(tmpdir, device_map="balanced", safety_checker=None) after_conversion = pipe( "foo", num_inference_steps=2, generator=torch.Generator("cpu").manual_seed(0), output_type="np", ).images self.assertTrue(np.allclose(pre_conversion, conversion, atol=1e-3)) self.assertTrue(np.allclose(conversion, after_conversion, atol=1e-3))
diffusers/tests/models/test_attention_processor.py/0
{ "file_path": "diffusers/tests/models/test_attention_processor.py", "repo_id": "diffusers", "token_count": 1824 }
154
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from pathlib import Path from tempfile import TemporaryDirectory from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card class CreateModelCardTest(unittest.TestCase): def test_generate_model_card_with_library_name(self): with TemporaryDirectory() as tmpdir: file_path = Path(tmpdir) / "README.md" file_path.write_text("---\nlibrary_name: foo\n---\nContent\n") model_card = load_or_create_model_card(file_path) populate_model_card(model_card) assert model_card.data.library_name == "foo"
diffusers/tests/others/test_hub_utils.py/0
{ "file_path": "diffusers/tests/others/test_hub_utils.py", "repo_id": "diffusers", "token_count": 399 }
155
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImg2ImgPipeline, UNet2DConditionModel, ) from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils import load_image from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_numpy, require_torch_gpu, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( IPAdapterTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class ControlNetImg2ImgPipelineFastTests( IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionControlNetImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"}) image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) image = floats_tensor(control_image.shape, rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": image, "control_image": control_image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) def test_ip_adapter_single(self): expected_pipe_slice = None if torch_device == "cpu": expected_pipe_slice = np.array([0.7096, 0.5149, 0.3571, 0.5897, 0.4715, 0.4052, 0.6098, 0.6886, 0.4213]) return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) class StableDiffusionMultiControlNetPipelineFastTests( IPAdapterTesterMixin, PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal_(m.weight) m.bias.data.fill_(1.0) controlnet1 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet1.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) controlnet2 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet2.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet1, controlnet2]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] image = floats_tensor(control_image[0].shape, rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": image, "control_image": control_image, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_ip_adapter_single(self): expected_pipe_slice = None if torch_device == "cpu": expected_pipe_slice = np.array([0.5293, 0.7339, 0.6642, 0.3950, 0.5212, 0.5175, 0.7002, 0.5907, 0.5182]) return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice) def test_save_pretrained_raise_not_implemented_exception(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(tmpdir) except NotImplementedError: pass @slow @require_torch_gpu class ControlNetImg2ImgPipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "evil space-punk bird" control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) output = pipe( prompt, image, control_image=control_image, generator=generator, output_type="np", num_inference_steps=50, strength=0.6, ) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" ) assert np.abs(expected_image - image).max() < 9e-2
diffusers/tests/pipelines/controlnet/test_controlnet_img2img.py/0
{ "file_path": "diffusers/tests/pipelines/controlnet/test_controlnet_img2img.py", "repo_id": "diffusers", "token_count": 7575 }
156
import gc import unittest import numpy as np import torch from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxPipeline, FluxTransformer2DModel from diffusers.utils.testing_utils import ( numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) from ..test_pipelines_common import ( PipelineTesterMixin, check_qkv_fusion_matches_attn_procs_length, check_qkv_fusion_processors_exist, ) class FluxPipelineFastTests(unittest.TestCase, PipelineTesterMixin): pipeline_class = FluxPipeline params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"]) batch_params = frozenset(["prompt"]) def get_dummy_components(self): torch.manual_seed(0) transformer = FluxTransformer2DModel( patch_size=1, in_channels=4, num_layers=1, num_single_layers=1, attention_head_dim=16, num_attention_heads=2, joint_attention_dim=32, pooled_projection_dim=32, axes_dims_rope=[4, 4, 8], ) clip_text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) torch.manual_seed(0) text_encoder = CLIPTextModel(clip_text_encoder_config) torch.manual_seed(0) text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) vae = AutoencoderKL( sample_size=32, in_channels=3, out_channels=3, block_out_channels=(4,), layers_per_block=1, latent_channels=1, norm_num_groups=1, use_quant_conv=False, use_post_quant_conv=False, shift_factor=0.0609, scaling_factor=1.5035, ) scheduler = FlowMatchEulerDiscreteScheduler() return { "scheduler": scheduler, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "transformer": transformer, "vae": vae, } def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "height": 8, "width": 8, "max_sequence_length": 48, "output_type": "np", } return inputs def test_flux_different_prompts(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) output_same_prompt = pipe(**inputs).images[0] inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "a different prompt" output_different_prompts = pipe(**inputs).images[0] max_diff = np.abs(output_same_prompt - output_different_prompts).max() # Outputs should be different here # For some reasons, they don't show large differences assert max_diff > 1e-6 def test_flux_prompt_embeds(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) output_with_prompt = pipe(**inputs).images[0] inputs = self.get_dummy_inputs(torch_device) prompt = inputs.pop("prompt") (prompt_embeds, pooled_prompt_embeds, text_ids) = pipe.encode_prompt( prompt, prompt_2=None, device=torch_device, max_sequence_length=inputs["max_sequence_length"], ) output_with_embeds = pipe( prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, **inputs, ).images[0] max_diff = np.abs(output_with_prompt - output_with_embeds).max() assert max_diff < 1e-4 def test_fused_qkv_projections(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images original_image_slice = image[0, -3:, -3:, -1] # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() assert check_qkv_fusion_processors_exist( pipe.transformer ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_fused = image[0, -3:, -3:, -1] pipe.transformer.unfuse_qkv_projections() inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] assert np.allclose( original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 ), "Fusion of QKV projections shouldn't affect the outputs." assert np.allclose( image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." assert np.allclose( original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 ), "Original outputs should match when fused QKV projections are disabled." @slow @require_torch_gpu class FluxPipelineSlowTests(unittest.TestCase): pipeline_class = FluxPipeline repo_id = "black-forest-labs/FLUX.1-schnell" def setUp(self): super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) return { "prompt": "A photo of a cat", "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", "generator": generator, } # TODO: Dhruv. Move large model tests to a dedicated runner) @unittest.skip("We cannot run inference on this model with the current CI hardware") def test_flux_inference(self): pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.bfloat16) pipe.enable_model_cpu_offload() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images[0] image_slice = image[0, :10, :10] expected_slice = np.array( [ [0.36132812, 0.30004883, 0.25830078], [0.36669922, 0.31103516, 0.23754883], [0.34814453, 0.29248047, 0.23583984], [0.35791016, 0.30981445, 0.23999023], [0.36328125, 0.31274414, 0.2607422], [0.37304688, 0.32177734, 0.26171875], [0.3671875, 0.31933594, 0.25756836], [0.36035156, 0.31103516, 0.2578125], [0.3857422, 0.33789062, 0.27563477], [0.3701172, 0.31982422, 0.265625], ], dtype=np.float32, ) max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten()) assert max_diff < 1e-4
diffusers/tests/pipelines/flux/test_pipeline_flux.py/0
{ "file_path": "diffusers/tests/pipelines/flux/test_pipeline_flux.py", "repo_id": "diffusers", "token_count": 4294 }
157
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, LDMSuperResolutionPipeline, UNet2DModel, VQModel from diffusers.utils import PIL_INTERPOLATION from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, nightly, require_torch, torch_device, ) enable_full_determinism() class LDMSuperResolutionPipelineFastTests(unittest.TestCase): @property def dummy_image(self): batch_size = 1 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image @property def dummy_uncond_unet(self): torch.manual_seed(0) model = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=6, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) return model @property def dummy_vq_model(self): torch.manual_seed(0) model = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=3, ) return model def test_inference_superresolution(self): device = "cpu" unet = self.dummy_uncond_unet scheduler = DDIMScheduler() vqvae = self.dummy_vq_model ldm = LDMSuperResolutionPipeline(unet=unet, vqvae=vqvae, scheduler=scheduler) ldm.to(device) ldm.set_progress_bar_config(disable=None) init_image = self.dummy_image.to(device) generator = torch.Generator(device=device).manual_seed(0) image = ldm(image=init_image, generator=generator, num_inference_steps=2, output_type="np").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.8678, 0.8245, 0.6381, 0.6830, 0.4385, 0.5599, 0.4641, 0.6201, 0.5150]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_inference_superresolution_fp16(self): unet = self.dummy_uncond_unet scheduler = DDIMScheduler() vqvae = self.dummy_vq_model # put models in fp16 unet = unet.half() vqvae = vqvae.half() ldm = LDMSuperResolutionPipeline(unet=unet, vqvae=vqvae, scheduler=scheduler) ldm.to(torch_device) ldm.set_progress_bar_config(disable=None) init_image = self.dummy_image.to(torch_device) image = ldm(init_image, num_inference_steps=2, output_type="np").images assert image.shape == (1, 64, 64, 3) @nightly @require_torch class LDMSuperResolutionPipelineIntegrationTests(unittest.TestCase): def test_inference_superresolution(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/vq_diffusion/teddy_bear_pool.png" ) init_image = init_image.resize((64, 64), resample=PIL_INTERPOLATION["lanczos"]) ldm = LDMSuperResolutionPipeline.from_pretrained("duongna/ldm-super-resolution") ldm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = ldm(image=init_image, generator=generator, num_inference_steps=20, output_type="np").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.7644, 0.7679, 0.7642, 0.7633, 0.7666, 0.7560, 0.7425, 0.7257, 0.6907]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
diffusers/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py/0
{ "file_path": "diffusers/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py", "repo_id": "diffusers", "token_count": 2036 }
158
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest import numpy as np import torch from transformers import AutoTokenizer, T5EncoderModel from diffusers import ( AutoencoderKL, DDIMScheduler, PixArtAlphaPipeline, PixArtTransformer2DModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np enable_full_determinism() class PixArtAlphaPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = PixArtAlphaPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params def get_dummy_components(self): torch.manual_seed(0) transformer = PixArtTransformer2DModel( sample_size=8, num_layers=2, patch_size=2, attention_head_dim=8, num_attention_heads=3, caption_channels=32, in_channels=4, cross_attention_dim=24, out_channels=8, attention_bias=True, activation_fn="gelu-approximate", num_embeds_ada_norm=1000, norm_type="ada_norm_single", norm_elementwise_affine=False, norm_eps=1e-6, ) torch.manual_seed(0) vae = AutoencoderKL() scheduler = DDIMScheduler() text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") components = { "transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "use_resolution_binning": False, "output_type": "np", } return inputs def test_sequential_cpu_offload_forward_pass(self): # TODO(PVP, Sayak) need to fix later return def test_save_load_optional_components(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = inputs["prompt"] generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = pipe.encode_prompt(prompt) # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attention_mask, "negative_prompt": None, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": negative_prompt_attention_mask, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "use_resolution_binning": False, } # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attention_mask, "negative_prompt": None, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": negative_prompt_attention_mask, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "use_resolution_binning": False, } output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1e-4) def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 8, 8, 3)) expected_slice = np.array([0.6319, 0.3526, 0.3806, 0.6327, 0.4639, 0.483, 0.2583, 0.5331, 0.4852]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_non_square_images(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs, height=32, width=48).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 32, 48, 3)) expected_slice = np.array([0.6493, 0.537, 0.4081, 0.4762, 0.3695, 0.4711, 0.3026, 0.5218, 0.5263]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_with_embeddings_and_multiple_images(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = inputs["prompt"] generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] prompt_embeds, prompt_attn_mask, negative_prompt_embeds, neg_prompt_attn_mask = pipe.encode_prompt(prompt) # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attn_mask, "negative_prompt": None, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": neg_prompt_attn_mask, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "num_images_per_prompt": 2, "use_resolution_binning": False, } # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attn_mask, "negative_prompt": None, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": neg_prompt_attn_mask, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "num_images_per_prompt": 2, "use_resolution_binning": False, } output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1e-4) def test_inference_with_multiple_images_per_prompt(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["num_images_per_prompt"] = 2 image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (2, 8, 8, 3)) expected_slice = np.array([0.6319, 0.3526, 0.3806, 0.6327, 0.4639, 0.483, 0.2583, 0.5331, 0.4852]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_raises_warning_for_mask_feature(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs.update({"mask_feature": True}) with self.assertWarns(FutureWarning) as warning_ctx: _ = pipe(**inputs).images assert "mask_feature" in str(warning_ctx.warning) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @slow @require_torch_gpu class PixArtAlphaPipelineIntegrationTests(unittest.TestCase): ckpt_id_1024 = "PixArt-alpha/PixArt-XL-2-1024-MS" ckpt_id_512 = "PixArt-alpha/PixArt-XL-2-512x512" prompt = "A small cactus with a happy face in the Sahara desert." def setUp(self): super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_pixart_1024(self): generator = torch.Generator("cpu").manual_seed(0) pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_1024, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = self.prompt image = pipe(prompt, generator=generator, num_inference_steps=2, output_type="np").images image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.0742, 0.0835, 0.2114, 0.0295, 0.0784, 0.2361, 0.1738, 0.2251, 0.3589]) max_diff = numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice) self.assertLessEqual(max_diff, 1e-4) def test_pixart_512(self): generator = torch.Generator("cpu").manual_seed(0) pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_512, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = self.prompt image = pipe(prompt, generator=generator, num_inference_steps=2, output_type="np").images image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.3477, 0.3882, 0.4541, 0.3413, 0.3821, 0.4463, 0.4001, 0.4409, 0.4958]) max_diff = numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice) self.assertLessEqual(max_diff, 1e-4) def test_pixart_1024_without_resolution_binning(self): generator = torch.manual_seed(0) pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_1024, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = self.prompt height, width = 1024, 768 num_inference_steps = 2 image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", ).images image_slice = image[0, -3:, -3:, -1] generator = torch.manual_seed(0) no_res_bin_image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", use_resolution_binning=False, ).images no_res_bin_image_slice = no_res_bin_image[0, -3:, -3:, -1] assert not np.allclose(image_slice, no_res_bin_image_slice, atol=1e-4, rtol=1e-4) def test_pixart_512_without_resolution_binning(self): generator = torch.manual_seed(0) pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_512, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = self.prompt height, width = 512, 768 num_inference_steps = 2 image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", ).images image_slice = image[0, -3:, -3:, -1] generator = torch.manual_seed(0) no_res_bin_image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", use_resolution_binning=False, ).images no_res_bin_image_slice = no_res_bin_image[0, -3:, -3:, -1] assert not np.allclose(image_slice, no_res_bin_image_slice, atol=1e-4, rtol=1e-4)
diffusers/tests/pipelines/pixart_alpha/test_pixart.py/0
{ "file_path": "diffusers/tests/pipelines/pixart_alpha/test_pixart.py", "repo_id": "diffusers", "token_count": 7234 }
159
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusion2InpaintPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"mask", "masked_image_latents"}) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_stable_diffusion_inpaint(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_inpaint_pipeline(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy" ) model_id = "stabilityai/stable-diffusion-2-inpainting" pipe = StableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "Face of a yellow cat, high resolution, sitting on a park bench" generator = torch.manual_seed(0) output = pipe( prompt=prompt, image=init_image, mask_image=mask_image, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def test_stable_diffusion_inpaint_pipeline_fp16(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy" ) model_id = "stabilityai/stable-diffusion-2-inpainting" pipe = StableDiffusionInpaintPipeline.from_pretrained( model_id, torch_dtype=torch.float16, safety_checker=None, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "Face of a yellow cat, high resolution, sitting on a park bench" generator = torch.manual_seed(0) output = pipe( prompt=prompt, image=init_image, mask_image=mask_image, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) model_id = "stabilityai/stable-diffusion-2-inpainting" pndm = PNDMScheduler.from_pretrained(model_id, subfolder="scheduler") pipe = StableDiffusionInpaintPipeline.from_pretrained( model_id, safety_checker=None, scheduler=pndm, torch_dtype=torch.float16, ) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() prompt = "Face of a yellow cat, high resolution, sitting on a park bench" generator = torch.manual_seed(0) _ = pipe( prompt=prompt, image=init_image, mask_image=mask_image, generator=generator, num_inference_steps=2, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py", "repo_id": "diffusers", "token_count": 4841 }
160