Добавлены .ipynb_checkpoints и __pycache__ в .gitignore
Browse files- .ipynb_checkpoints/README-checkpoint.md +0 -116
- .ipynb_checkpoints/Untitled-checkpoint.ipynb +0 -99
- .ipynb_checkpoints/model_index-checkpoint.json +0 -25
- .ipynb_checkpoints/pipeline_waifu-checkpoint.py +0 -641
- .ipynb_checkpoints/test-checkpoint.ipynb +0 -0
- .ipynb_checkpoints/waifu-checkpoint.png +0 -0
- __pycache__/pipeline_waifu.cpython-310.pyc +0 -0
- __pycache__/pipeline_waifu.cpython-311.pyc +0 -0
.ipynb_checkpoints/README-checkpoint.md
DELETED
|
@@ -1,116 +0,0 @@
|
|
| 1 |
-
---
|
| 2 |
-
license: apache-2.0
|
| 3 |
-
pipeline_tag: text-to-image
|
| 4 |
-
---
|
| 5 |
-
# Work / train in progress!
|
| 6 |
-

|
| 7 |
-
|
| 8 |
-
⚡️Waifu: efficient high-resolution waifu synthesis
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
waifu is a free text-to-image model that can efficiently generate images in 80 languages. Our goal is to create a small model without compromising on quality.
|
| 12 |
-
|
| 13 |
-
## Core designs include:
|
| 14 |
-
|
| 15 |
-
(1) [**AuraDiffusion/16ch-vae**](https://huggingface.co/AuraDiffusion/16ch-vae): A fully open source 16ch VAE. Natively trained in fp16. \
|
| 16 |
-
(2) [**Linear DiT**](https://github.com/NVlabs/Sana): we use 1.6b DiT transformer with linear attention. \
|
| 17 |
-
(3) [**MEXMA-SigLIP**](https://huggingface.co/visheratin/mexma-siglip): MEXMA-SigLIP is a model that combines the [MEXMA](https://huggingface.co/facebook/MEXMA) multilingual text encoder and an image encoder from the [SigLIP](https://huggingface.co/timm/ViT-SO400M-14-SigLIP-384) model. This allows us to get a high-performance CLIP model for 80 languages.. \
|
| 18 |
-
(4) Other: we use Flow-Euler sampler, Adafactor-Fused optimizer and bf16 precision for training, and combine efficient caption labeling (MoonDream, CogVlM, Human, Gpt's) and danbooru tags to accelerate convergence.
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
## Example
|
| 22 |
-
|
| 23 |
-
```py
|
| 24 |
-
import torch
|
| 25 |
-
from diffusers import DiffusionPipeline
|
| 26 |
-
|
| 27 |
-
from transformers import XLMRobertaTokenizerFast,XLMRobertaModel
|
| 28 |
-
from diffusers import FlowMatchEulerDiscreteScheduler
|
| 29 |
-
from diffusers.models import AutoencoderKL
|
| 30 |
-
from diffusers import SanaTransformer2DModel
|
| 31 |
-
|
| 32 |
-
pipe_id = "AiArtLab/waifu-2b"
|
| 33 |
-
variant = "fp16"
|
| 34 |
-
# tokenizer
|
| 35 |
-
tokenizer = XLMRobertaTokenizerFast.from_pretrained(
|
| 36 |
-
pipe_id,
|
| 37 |
-
subfolder="tokenizer"
|
| 38 |
-
)
|
| 39 |
-
|
| 40 |
-
# text_encoder
|
| 41 |
-
text_encoder = XLMRobertaModel.from_pretrained(
|
| 42 |
-
pipe_id,
|
| 43 |
-
variant=variant,
|
| 44 |
-
subfolder="text_encoder",
|
| 45 |
-
add_pooling_layer=False
|
| 46 |
-
).to("cuda")
|
| 47 |
-
|
| 48 |
-
# scheduler
|
| 49 |
-
scheduler = FlowMatchEulerDiscreteScheduler(shift=1.0)
|
| 50 |
-
|
| 51 |
-
# VAE
|
| 52 |
-
vae = AutoencoderKL.from_pretrained(
|
| 53 |
-
pipe_id,
|
| 54 |
-
variant=variant,
|
| 55 |
-
subfolder="vae"
|
| 56 |
-
).to("cuda")
|
| 57 |
-
|
| 58 |
-
# Transformer
|
| 59 |
-
transformer = SanaTransformer2DModel.from_pretrained(
|
| 60 |
-
pipe_id,
|
| 61 |
-
variant=variant,
|
| 62 |
-
subfolder="transformer"
|
| 63 |
-
).to("cuda")
|
| 64 |
-
|
| 65 |
-
# Pipeline
|
| 66 |
-
pipeline = DiffusionPipeline.from_pretrained(
|
| 67 |
-
pipe_id,
|
| 68 |
-
tokenizer=tokenizer,
|
| 69 |
-
text_encoder=text_encoder,
|
| 70 |
-
vae=vae,
|
| 71 |
-
transformer=transformer,
|
| 72 |
-
trust_remote_code=True,
|
| 73 |
-
).to("cuda")
|
| 74 |
-
print(pipeline)
|
| 75 |
-
|
| 76 |
-
prompt = 'аниме девушка, waifu, يبتسم جنسيا , sur le fond de la tour Eiffel'
|
| 77 |
-
generator = torch.Generator(device="cuda").manual_seed(42)
|
| 78 |
-
|
| 79 |
-
image = pipeline(
|
| 80 |
-
prompt = prompt,
|
| 81 |
-
negative_prompt = "",
|
| 82 |
-
generator=generator,
|
| 83 |
-
)[0]
|
| 84 |
-
|
| 85 |
-
for img in image:
|
| 86 |
-
img.show()
|
| 87 |
-
img.save('waifu.png')
|
| 88 |
-
|
| 89 |
-
```
|
| 90 |
-
|
| 91 |
-

|
| 92 |
-
|
| 93 |
-
## Donations
|
| 94 |
-
|
| 95 |
-
We are a small GPU poor group of enthusiasts (current train budget ~$2k)
|
| 96 |
-
|
| 97 |
-
Please contact with us if you may provide some GPU's on training
|
| 98 |
-
|
| 99 |
-
DOGE: DEw2DR8C7BnF8GgcrfTzUjSnGkuMeJhg83
|
| 100 |
-
|
| 101 |
-

|
| 102 |
-
A fluffy domestic cat with piercing green eyes sits attentively in a sunlit room filled natural light streaming through large windows, its soft fur reflecting warm hues of orange from the golden glow casting across its sleek body and delicate features
|
| 103 |
-
|
| 104 |
-
## Contacts
|
| 105 |
-
|
| 106 |
-
[recoilme](https://t.me/recoilme)
|
| 107 |
-
|
| 108 |
-
## How to cite
|
| 109 |
-
|
| 110 |
-
```bibtex
|
| 111 |
-
@misc{Waifu,
|
| 112 |
-
url = {[https://huggingface.co/AiArtLab/waifu-2b](https://huggingface.co/AiArtLab/waifu-2b)},
|
| 113 |
-
title = {waifu-2b},
|
| 114 |
-
author = {recoilme, muinez, femboysLover}
|
| 115 |
-
}
|
| 116 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.ipynb_checkpoints/Untitled-checkpoint.ipynb
DELETED
|
@@ -1,99 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"cells": [
|
| 3 |
-
{
|
| 4 |
-
"cell_type": "code",
|
| 5 |
-
"execution_count": 13,
|
| 6 |
-
"id": "dca3239c-17d6-4284-a2cf-83237a55a7df",
|
| 7 |
-
"metadata": {},
|
| 8 |
-
"outputs": [
|
| 9 |
-
{
|
| 10 |
-
"ename": "AttributeError",
|
| 11 |
-
"evalue": "module 'diffusers_modules.local.pipeline_waifu' has no attribute 'WaifuPipeline'",
|
| 12 |
-
"output_type": "error",
|
| 13 |
-
"traceback": [
|
| 14 |
-
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
| 15 |
-
"\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)",
|
| 16 |
-
"Cell \u001b[0;32mIn[13], line 6\u001b[0m\n\u001b[1;32m 4\u001b[0m pipe_id \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m/home/recoilme/models/waifu-2b\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 5\u001b[0m variant \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfp16\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m----> 6\u001b[0m pipe \u001b[38;5;241m=\u001b[39m \u001b[43mDiffusionPipeline\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfrom_pretrained\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 7\u001b[0m \u001b[43m \u001b[49m\u001b[43mpipe_id\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[1;32m 8\u001b[0m \u001b[43m \u001b[49m\u001b[43mvariant\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mvariant\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 9\u001b[0m \u001b[43m \u001b[49m\u001b[43mtrust_remote_code\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\n\u001b[1;32m 10\u001b[0m \u001b[43m)\u001b[49m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;28mprint\u001b[39m(pipe)\n\u001b[1;32m 12\u001b[0m \u001b[38;5;66;03m#pipe_sd.to(\"cuda\")\u001b[39;00m\n",
|
| 17 |
-
"File \u001b[0;32m~/.local/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py:114\u001b[0m, in \u001b[0;36mvalidate_hf_hub_args.<locals>._inner_fn\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 111\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m check_use_auth_token:\n\u001b[1;32m 112\u001b[0m kwargs \u001b[38;5;241m=\u001b[39m smoothly_deprecate_use_auth_token(fn_name\u001b[38;5;241m=\u001b[39mfn\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m, has_token\u001b[38;5;241m=\u001b[39mhas_token, kwargs\u001b[38;5;241m=\u001b[39mkwargs)\n\u001b[0;32m--> 114\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
|
| 18 |
-
"File \u001b[0;32m~/.local/lib/python3.11/site-packages/diffusers/pipelines/pipeline_utils.py:785\u001b[0m, in \u001b[0;36mDiffusionPipeline.from_pretrained\u001b[0;34m(cls, pretrained_model_name_or_path, **kwargs)\u001b[0m\n\u001b[1;32m 780\u001b[0m \u001b[38;5;66;03m# 3. Load the pipeline class, if using custom module then load it from the hub\u001b[39;00m\n\u001b[1;32m 781\u001b[0m \u001b[38;5;66;03m# if we load from explicit class, let's use it\u001b[39;00m\n\u001b[1;32m 782\u001b[0m custom_pipeline, custom_class_name \u001b[38;5;241m=\u001b[39m _resolve_custom_pipeline_and_cls(\n\u001b[1;32m 783\u001b[0m folder\u001b[38;5;241m=\u001b[39mcached_folder, config\u001b[38;5;241m=\u001b[39mconfig_dict, custom_pipeline\u001b[38;5;241m=\u001b[39mcustom_pipeline\n\u001b[1;32m 784\u001b[0m )\n\u001b[0;32m--> 785\u001b[0m pipeline_class \u001b[38;5;241m=\u001b[39m \u001b[43m_get_pipeline_class\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 786\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mcls\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 787\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig_dict\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 788\u001b[0m \u001b[43m \u001b[49m\u001b[43mload_connected_pipeline\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mload_connected_pipeline\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 789\u001b[0m \u001b[43m \u001b[49m\u001b[43mcustom_pipeline\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcustom_pipeline\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 790\u001b[0m \u001b[43m \u001b[49m\u001b[43mclass_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcustom_class_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 791\u001b[0m \u001b[43m \u001b[49m\u001b[43mcache_dir\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcache_dir\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 792\u001b[0m \u001b[43m \u001b[49m\u001b[43mrevision\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcustom_revision\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 793\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 795\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m device_map \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m pipeline_class\u001b[38;5;241m.\u001b[39m_load_connected_pipes:\n\u001b[1;32m 796\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mNotImplementedError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m`device_map` is not yet supported for connected pipelines.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
|
| 19 |
-
"File \u001b[0;32m~/.local/lib/python3.11/site-packages/diffusers/pipelines/pipeline_loading_utils.py:370\u001b[0m, in \u001b[0;36m_get_pipeline_class\u001b[0;34m(class_obj, config, load_connected_pipeline, custom_pipeline, repo_id, hub_revision, class_name, cache_dir, revision)\u001b[0m\n\u001b[1;32m 358\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_get_pipeline_class\u001b[39m(\n\u001b[1;32m 359\u001b[0m class_obj,\n\u001b[1;32m 360\u001b[0m config\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 367\u001b[0m revision\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 368\u001b[0m ):\n\u001b[1;32m 369\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m custom_pipeline \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 370\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_get_custom_pipeline_class\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 371\u001b[0m \u001b[43m \u001b[49m\u001b[43mcustom_pipeline\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 372\u001b[0m \u001b[43m \u001b[49m\u001b[43mrepo_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrepo_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 373\u001b[0m \u001b[43m \u001b[49m\u001b[43mhub_revision\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mhub_revision\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 374\u001b[0m \u001b[43m \u001b[49m\u001b[43mclass_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mclass_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 375\u001b[0m \u001b[43m \u001b[49m\u001b[43mcache_dir\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcache_dir\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 376\u001b[0m \u001b[43m \u001b[49m\u001b[43mrevision\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrevision\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 377\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 379\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m class_obj\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m \u001b[38;5;241m!=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDiffusionPipeline\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[1;32m 380\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m class_obj\n",
|
| 20 |
-
"File \u001b[0;32m~/.local/lib/python3.11/site-packages/diffusers/pipelines/pipeline_loading_utils.py:349\u001b[0m, in \u001b[0;36m_get_custom_pipeline_class\u001b[0;34m(custom_pipeline, repo_id, hub_revision, class_name, cache_dir, revision)\u001b[0m\n\u001b[1;32m 344\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m repo_id \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m hub_revision \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 345\u001b[0m \u001b[38;5;66;03m# if we load the pipeline code from the Hub\u001b[39;00m\n\u001b[1;32m 346\u001b[0m \u001b[38;5;66;03m# make sure to overwrite the `revision`\u001b[39;00m\n\u001b[1;32m 347\u001b[0m revision \u001b[38;5;241m=\u001b[39m hub_revision\n\u001b[0;32m--> 349\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mget_class_from_dynamic_module\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 350\u001b[0m \u001b[43m \u001b[49m\u001b[43mcustom_pipeline\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 351\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodule_file\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfile_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 352\u001b[0m \u001b[43m \u001b[49m\u001b[43mclass_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mclass_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 353\u001b[0m \u001b[43m \u001b[49m\u001b[43mcache_dir\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcache_dir\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 354\u001b[0m \u001b[43m \u001b[49m\u001b[43mrevision\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrevision\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 355\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
|
| 21 |
-
"File \u001b[0;32m~/.local/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py:114\u001b[0m, in \u001b[0;36mvalidate_hf_hub_args.<locals>._inner_fn\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 111\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m check_use_auth_token:\n\u001b[1;32m 112\u001b[0m kwargs \u001b[38;5;241m=\u001b[39m smoothly_deprecate_use_auth_token(fn_name\u001b[38;5;241m=\u001b[39mfn\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m, has_token\u001b[38;5;241m=\u001b[39mhas_token, kwargs\u001b[38;5;241m=\u001b[39mkwargs)\n\u001b[0;32m--> 114\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
|
| 22 |
-
"File \u001b[0;32m~/.local/lib/python3.11/site-packages/diffusers/utils/dynamic_modules_utils.py:457\u001b[0m, in \u001b[0;36mget_class_from_dynamic_module\u001b[0;34m(pretrained_model_name_or_path, module_file, class_name, cache_dir, force_download, proxies, token, revision, local_files_only, **kwargs)\u001b[0m\n\u001b[1;32m 446\u001b[0m \u001b[38;5;66;03m# And lastly we get the class inside our newly created module\u001b[39;00m\n\u001b[1;32m 447\u001b[0m final_module \u001b[38;5;241m=\u001b[39m get_cached_module_file(\n\u001b[1;32m 448\u001b[0m pretrained_model_name_or_path,\n\u001b[1;32m 449\u001b[0m module_file,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 455\u001b[0m local_files_only\u001b[38;5;241m=\u001b[39mlocal_files_only,\n\u001b[1;32m 456\u001b[0m )\n\u001b[0;32m--> 457\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mget_class_in_module\u001b[49m\u001b[43m(\u001b[49m\u001b[43mclass_name\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfinal_module\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mreplace\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m.py\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n",
|
| 23 |
-
"File \u001b[0;32m~/.local/lib/python3.11/site-packages/diffusers/utils/dynamic_modules_utils.py:166\u001b[0m, in \u001b[0;36mget_class_in_module\u001b[0;34m(class_name, module_path)\u001b[0m\n\u001b[1;32m 164\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m class_name \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 165\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m find_pipeline_class(module)\n\u001b[0;32m--> 166\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mgetattr\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mmodule\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mclass_name\u001b[49m\u001b[43m)\u001b[49m\n",
|
| 24 |
-
"\u001b[0;31mAttributeError\u001b[0m: module 'diffusers_modules.local.pipeline_waifu' has no attribute 'WaifuPipeline'"
|
| 25 |
-
]
|
| 26 |
-
}
|
| 27 |
-
],
|
| 28 |
-
"source": [
|
| 29 |
-
"import torch\n",
|
| 30 |
-
"from diffusers import DiffusionPipeline\n",
|
| 31 |
-
"\n",
|
| 32 |
-
"pipe_id = \"/home/recoilme/models/waifu-2b\"\n",
|
| 33 |
-
"variant = \"fp16\"\n",
|
| 34 |
-
"pipe = DiffusionPipeline.from_pretrained(\n",
|
| 35 |
-
" pipe_id, \n",
|
| 36 |
-
" variant=variant,\n",
|
| 37 |
-
" trust_remote_code=True\n",
|
| 38 |
-
")\n",
|
| 39 |
-
"print(pipe)\n",
|
| 40 |
-
"#pipe_sd.to(\"cuda\")"
|
| 41 |
-
]
|
| 42 |
-
},
|
| 43 |
-
{
|
| 44 |
-
"cell_type": "code",
|
| 45 |
-
"execution_count": null,
|
| 46 |
-
"id": "b6ebc579-0eb2-4828-89d5-b40f6d5e758e",
|
| 47 |
-
"metadata": {},
|
| 48 |
-
"outputs": [],
|
| 49 |
-
"source": [
|
| 50 |
-
"SanaPipeline {\n",
|
| 51 |
-
" \"_class_name\": \"SanaPipeline\",\n",
|
| 52 |
-
" \"_diffusers_version\": \"0.32.0.dev0\",\n",
|
| 53 |
-
" \"_name_or_path\": \"AiArtLab/waifu-2b\",\n",
|
| 54 |
-
" \"scheduler\": [\n",
|
| 55 |
-
" \"diffusers\",\n",
|
| 56 |
-
" \"FlowMatchEulerDiscreteScheduler\"\n",
|
| 57 |
-
" ],\n",
|
| 58 |
-
" \"text_encoder\": [\n",
|
| 59 |
-
" \"transformers\",\n",
|
| 60 |
-
" \"XLMRobertaModel\"\n",
|
| 61 |
-
" ],\n",
|
| 62 |
-
" \"tokenizer\": [\n",
|
| 63 |
-
" \"transformers\",\n",
|
| 64 |
-
" \"XLMRobertaTokenizerFast\"\n",
|
| 65 |
-
" ],\n",
|
| 66 |
-
" \"transformer\": [\n",
|
| 67 |
-
" \"diffusers\",\n",
|
| 68 |
-
" \"SanaTransformer2DModel\"\n",
|
| 69 |
-
" ],\n",
|
| 70 |
-
" \"vae\": [\n",
|
| 71 |
-
" \"diffusers\",\n",
|
| 72 |
-
" \"AutoencoderKL\"\n",
|
| 73 |
-
" ]\n",
|
| 74 |
-
"}\n"
|
| 75 |
-
]
|
| 76 |
-
}
|
| 77 |
-
],
|
| 78 |
-
"metadata": {
|
| 79 |
-
"kernelspec": {
|
| 80 |
-
"display_name": "Python 3 (ipykernel)",
|
| 81 |
-
"language": "python",
|
| 82 |
-
"name": "python3"
|
| 83 |
-
},
|
| 84 |
-
"language_info": {
|
| 85 |
-
"codemirror_mode": {
|
| 86 |
-
"name": "ipython",
|
| 87 |
-
"version": 3
|
| 88 |
-
},
|
| 89 |
-
"file_extension": ".py",
|
| 90 |
-
"mimetype": "text/x-python",
|
| 91 |
-
"name": "python",
|
| 92 |
-
"nbconvert_exporter": "python",
|
| 93 |
-
"pygments_lexer": "ipython3",
|
| 94 |
-
"version": "3.11.6"
|
| 95 |
-
}
|
| 96 |
-
},
|
| 97 |
-
"nbformat": 4,
|
| 98 |
-
"nbformat_minor": 5
|
| 99 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.ipynb_checkpoints/model_index-checkpoint.json
DELETED
|
@@ -1,25 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"_class_name": ["pipeline_waifu", "WaifuPipeline"],
|
| 3 |
-
"_diffusers_version": "0.32.0.dev0",
|
| 4 |
-
"_name_or_path": "AiArtLab/waifu-2b",
|
| 5 |
-
"scheduler": [
|
| 6 |
-
"diffusers",
|
| 7 |
-
"FlowMatchEulerDiscreteScheduler"
|
| 8 |
-
],
|
| 9 |
-
"text_encoder": [
|
| 10 |
-
"transformers",
|
| 11 |
-
"XLMRobertaModel"
|
| 12 |
-
],
|
| 13 |
-
"tokenizer": [
|
| 14 |
-
"transformers",
|
| 15 |
-
"XLMRobertaTokenizerFast"
|
| 16 |
-
],
|
| 17 |
-
"transformer": [
|
| 18 |
-
"diffusers",
|
| 19 |
-
"SanaTransformer2DModel"
|
| 20 |
-
],
|
| 21 |
-
"vae": [
|
| 22 |
-
"diffusers",
|
| 23 |
-
"AutoencoderKL"
|
| 24 |
-
]
|
| 25 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.ipynb_checkpoints/pipeline_waifu-checkpoint.py
DELETED
|
@@ -1,641 +0,0 @@
|
|
| 1 |
-
# Copyright 2024 PixArt-Sigma Authors and The HuggingFace Team. All rights reserved.
|
| 2 |
-
#
|
| 3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
-
# you may not use this file except in compliance with the License.
|
| 5 |
-
# You may obtain a copy of the License at
|
| 6 |
-
#
|
| 7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
-
#
|
| 9 |
-
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
-
# See the License for the specific language governing permissions and
|
| 13 |
-
# limitations under the License.
|
| 14 |
-
|
| 15 |
-
import inspect
|
| 16 |
-
from typing import Callable, Dict, List, Optional, Union
|
| 17 |
-
|
| 18 |
-
import torch
|
| 19 |
-
from diffusers.image_processor import PixArtImageProcessor
|
| 20 |
-
from diffusers.utils.torch_utils import randn_tensor
|
| 21 |
-
from diffusers import DiffusionPipeline
|
| 22 |
-
from transformers import XLMRobertaTokenizerFast,XLMRobertaModel
|
| 23 |
-
from diffusers import SanaTransformer2DModel
|
| 24 |
-
from diffusers.models import AutoencoderKL
|
| 25 |
-
from diffusers import FlowMatchEulerDiscreteScheduler
|
| 26 |
-
from typing import List, Union
|
| 27 |
-
import numpy as np
|
| 28 |
-
import PIL.Image
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
EXAMPLE_DOC_STRING = """
|
| 32 |
-
Examples:
|
| 33 |
-
```py
|
| 34 |
-
>>> import torch
|
| 35 |
-
>>> from diffusers import WaifuPipeline
|
| 36 |
-
|
| 37 |
-
>>> pipe = WaifuPipeline.from_pretrained(
|
| 38 |
-
... "AiArtLab/waifu-2b"
|
| 39 |
-
... )
|
| 40 |
-
>>> pipe.to("cuda")
|
| 41 |
-
|
| 42 |
-
>>> image = pipe(prompt='a cyberpunk cat with a neon sign that says "Sana"')[0]
|
| 43 |
-
>>> image[0].save("output.png")
|
| 44 |
-
```
|
| 45 |
-
"""
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
| 49 |
-
def retrieve_timesteps(
|
| 50 |
-
scheduler,
|
| 51 |
-
num_inference_steps: Optional[int] = None,
|
| 52 |
-
device: Optional[Union[str, torch.device]] = None,
|
| 53 |
-
timesteps: Optional[List[int]] = None,
|
| 54 |
-
sigmas: Optional[List[float]] = None,
|
| 55 |
-
**kwargs,
|
| 56 |
-
):
|
| 57 |
-
r"""
|
| 58 |
-
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
| 59 |
-
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
| 60 |
-
|
| 61 |
-
Args:
|
| 62 |
-
scheduler (`SchedulerMixin`):
|
| 63 |
-
The scheduler to get timesteps from.
|
| 64 |
-
num_inference_steps (`int`):
|
| 65 |
-
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
|
| 66 |
-
must be `None`.
|
| 67 |
-
device (`str` or `torch.device`, *optional*):
|
| 68 |
-
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 69 |
-
timesteps (`List[int]`, *optional*):
|
| 70 |
-
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
|
| 71 |
-
`num_inference_steps` and `sigmas` must be `None`.
|
| 72 |
-
sigmas (`List[float]`, *optional*):
|
| 73 |
-
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
|
| 74 |
-
`num_inference_steps` and `timesteps` must be `None`.
|
| 75 |
-
|
| 76 |
-
Returns:
|
| 77 |
-
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
| 78 |
-
second element is the number of inference steps.
|
| 79 |
-
"""
|
| 80 |
-
if timesteps is not None and sigmas is not None:
|
| 81 |
-
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
|
| 82 |
-
if timesteps is not None:
|
| 83 |
-
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 84 |
-
if not accepts_timesteps:
|
| 85 |
-
raise ValueError(
|
| 86 |
-
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 87 |
-
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 88 |
-
)
|
| 89 |
-
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 90 |
-
timesteps = scheduler.timesteps
|
| 91 |
-
num_inference_steps = len(timesteps)
|
| 92 |
-
elif sigmas is not None:
|
| 93 |
-
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 94 |
-
if not accept_sigmas:
|
| 95 |
-
raise ValueError(
|
| 96 |
-
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 97 |
-
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
| 98 |
-
)
|
| 99 |
-
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
| 100 |
-
timesteps = scheduler.timesteps
|
| 101 |
-
num_inference_steps = len(timesteps)
|
| 102 |
-
else:
|
| 103 |
-
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 104 |
-
timesteps = scheduler.timesteps
|
| 105 |
-
return timesteps, num_inference_steps
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
class WaifuPipeline(DiffusionPipeline):
|
| 109 |
-
r"""
|
| 110 |
-
Pipeline for text-to-image generation using [Sana](https://huggingface.co/papers/2410.10629).
|
| 111 |
-
"""
|
| 112 |
-
|
| 113 |
-
model_cpu_offload_seq = "text_encoder->transformer->vae"
|
| 114 |
-
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
|
| 115 |
-
|
| 116 |
-
def __init__(
|
| 117 |
-
self,
|
| 118 |
-
tokenizer: XLMRobertaTokenizerFast,
|
| 119 |
-
text_encoder: XLMRobertaModel,
|
| 120 |
-
vae: AutoencoderKL,
|
| 121 |
-
transformer: SanaTransformer2DModel,
|
| 122 |
-
scheduler: FlowMatchEulerDiscreteScheduler,
|
| 123 |
-
):
|
| 124 |
-
super().__init__()
|
| 125 |
-
|
| 126 |
-
self.register_modules(
|
| 127 |
-
tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler
|
| 128 |
-
)
|
| 129 |
-
|
| 130 |
-
self.vae_scale_factor = (
|
| 131 |
-
8
|
| 132 |
-
)
|
| 133 |
-
self.image_processor = PixArtImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 134 |
-
|
| 135 |
-
def encode_prompt(
|
| 136 |
-
self,
|
| 137 |
-
prompt: Union[str, List[str]],
|
| 138 |
-
do_classifier_free_guidance: bool = True,
|
| 139 |
-
negative_prompt: str = "",
|
| 140 |
-
num_images_per_prompt: int = 1,
|
| 141 |
-
device: Optional[torch.device] = None,
|
| 142 |
-
prompt_embeds: Optional[torch.Tensor] = None,
|
| 143 |
-
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 144 |
-
prompt_attention_mask: Optional[torch.Tensor] = None,
|
| 145 |
-
negative_prompt_attention_mask: Optional[torch.Tensor] = None,
|
| 146 |
-
max_sequence_length: int = 512,
|
| 147 |
-
):
|
| 148 |
-
r"""
|
| 149 |
-
Encodes the prompt into text encoder hidden states.
|
| 150 |
-
|
| 151 |
-
Args:
|
| 152 |
-
prompt (`str` or `List[str]`, *optional*):
|
| 153 |
-
prompt to be encoded
|
| 154 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
| 155 |
-
The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`
|
| 156 |
-
instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). For
|
| 157 |
-
PixArt-Alpha, this should be "".
|
| 158 |
-
do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
|
| 159 |
-
whether to use classifier free guidance or not
|
| 160 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 161 |
-
number of images that should be generated per prompt
|
| 162 |
-
device: (`torch.device`, *optional*):
|
| 163 |
-
torch device to place the resulting embeddings on
|
| 164 |
-
prompt_embeds (`torch.Tensor`, *optional*):
|
| 165 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 166 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
| 167 |
-
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 168 |
-
Pre-generated negative text embeddings. For Sana, it's should be the embeddings of the "" string.
|
| 169 |
-
max_sequence_length (`int`, defaults to 512): Maximum sequence length to use for the prompt.
|
| 170 |
-
"""
|
| 171 |
-
|
| 172 |
-
if device is None:
|
| 173 |
-
device = self._execution_device
|
| 174 |
-
|
| 175 |
-
if prompt is not None and isinstance(prompt, str):
|
| 176 |
-
batch_size = 1
|
| 177 |
-
elif prompt is not None and isinstance(prompt, list):
|
| 178 |
-
batch_size = len(prompt)
|
| 179 |
-
else:
|
| 180 |
-
batch_size = prompt_embeds.shape[0]
|
| 181 |
-
|
| 182 |
-
if self.tokenizer is not None:
|
| 183 |
-
self.tokenizer.padding_side = "right"
|
| 184 |
-
|
| 185 |
-
max_length = max_sequence_length
|
| 186 |
-
select_index = [0] + list(range(-max_length + 1, 0))
|
| 187 |
-
|
| 188 |
-
if prompt_embeds is None:
|
| 189 |
-
prompt = self._text_preprocessing(prompt)
|
| 190 |
-
|
| 191 |
-
max_length_all = max_length
|
| 192 |
-
|
| 193 |
-
text_inputs = self.tokenizer(
|
| 194 |
-
prompt,
|
| 195 |
-
padding="max_length",
|
| 196 |
-
max_length=max_length_all,
|
| 197 |
-
truncation=True,
|
| 198 |
-
add_special_tokens=True,
|
| 199 |
-
return_tensors="pt",
|
| 200 |
-
)
|
| 201 |
-
text_input_ids = text_inputs.input_ids
|
| 202 |
-
|
| 203 |
-
prompt_attention_mask = text_inputs.attention_mask
|
| 204 |
-
prompt_attention_mask = prompt_attention_mask.to(device)
|
| 205 |
-
|
| 206 |
-
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask)
|
| 207 |
-
prompt_embeds = prompt_embeds[0][:, select_index]
|
| 208 |
-
prompt_attention_mask = prompt_attention_mask[:, select_index]
|
| 209 |
-
|
| 210 |
-
if self.transformer is not None:
|
| 211 |
-
dtype = self.transformer.dtype
|
| 212 |
-
elif self.text_encoder is not None:
|
| 213 |
-
dtype = self.text_encoder.dtype
|
| 214 |
-
else:
|
| 215 |
-
dtype = None
|
| 216 |
-
|
| 217 |
-
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
|
| 218 |
-
|
| 219 |
-
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 220 |
-
# duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method
|
| 221 |
-
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 222 |
-
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 223 |
-
prompt_attention_mask = prompt_attention_mask.view(bs_embed, -1)
|
| 224 |
-
prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1)
|
| 225 |
-
|
| 226 |
-
# get unconditional embeddings for classifier free guidance
|
| 227 |
-
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 228 |
-
#print("do_classifier_free_guidance and negative_prompt_embeds is None")
|
| 229 |
-
uncond_tokens = [negative_prompt] * batch_size if isinstance(negative_prompt, str) else negative_prompt
|
| 230 |
-
uncond_tokens = self._text_preprocessing(uncond_tokens)
|
| 231 |
-
max_length = prompt_embeds.shape[1]
|
| 232 |
-
uncond_input = self.tokenizer(
|
| 233 |
-
uncond_tokens,
|
| 234 |
-
padding="max_length",
|
| 235 |
-
max_length=max_length,
|
| 236 |
-
truncation=True,
|
| 237 |
-
return_attention_mask=True,
|
| 238 |
-
add_special_tokens=True,
|
| 239 |
-
return_tensors="pt",
|
| 240 |
-
)
|
| 241 |
-
negative_prompt_attention_mask = uncond_input.attention_mask
|
| 242 |
-
negative_prompt_attention_mask = negative_prompt_attention_mask.to(device)
|
| 243 |
-
|
| 244 |
-
negative_prompt_embeds = self.text_encoder(
|
| 245 |
-
uncond_input.input_ids.to(device), attention_mask=negative_prompt_attention_mask
|
| 246 |
-
)
|
| 247 |
-
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 248 |
-
|
| 249 |
-
if do_classifier_free_guidance:
|
| 250 |
-
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 251 |
-
seq_len = negative_prompt_embeds.shape[1]
|
| 252 |
-
|
| 253 |
-
negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device)
|
| 254 |
-
|
| 255 |
-
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 256 |
-
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 257 |
-
|
| 258 |
-
negative_prompt_attention_mask = negative_prompt_attention_mask.view(bs_embed, -1)
|
| 259 |
-
negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1)
|
| 260 |
-
else:
|
| 261 |
-
negative_prompt_embeds = None
|
| 262 |
-
negative_prompt_attention_mask = None
|
| 263 |
-
|
| 264 |
-
return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask
|
| 265 |
-
|
| 266 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 267 |
-
def prepare_extra_step_kwargs(self, generator, eta):
|
| 268 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 269 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 270 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 271 |
-
# and should be between [0, 1]
|
| 272 |
-
|
| 273 |
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 274 |
-
extra_step_kwargs = {}
|
| 275 |
-
if accepts_eta:
|
| 276 |
-
extra_step_kwargs["eta"] = eta
|
| 277 |
-
|
| 278 |
-
# check if the scheduler accepts generator
|
| 279 |
-
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 280 |
-
if accepts_generator:
|
| 281 |
-
extra_step_kwargs["generator"] = generator
|
| 282 |
-
return extra_step_kwargs
|
| 283 |
-
|
| 284 |
-
def check_inputs(
|
| 285 |
-
self,
|
| 286 |
-
prompt,
|
| 287 |
-
height,
|
| 288 |
-
width,
|
| 289 |
-
callback_on_step_end_tensor_inputs=None,
|
| 290 |
-
negative_prompt=None,
|
| 291 |
-
prompt_embeds=None,
|
| 292 |
-
negative_prompt_embeds=None,
|
| 293 |
-
prompt_attention_mask=None,
|
| 294 |
-
negative_prompt_attention_mask=None,
|
| 295 |
-
):
|
| 296 |
-
if height % 64 != 0 or width % 64 != 0:
|
| 297 |
-
raise ValueError(f"`height` and `width` have to be divisible by 64 but are {height} and {width}.")
|
| 298 |
-
|
| 299 |
-
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 300 |
-
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 301 |
-
):
|
| 302 |
-
raise ValueError(
|
| 303 |
-
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 304 |
-
)
|
| 305 |
-
|
| 306 |
-
if prompt is not None and prompt_embeds is not None:
|
| 307 |
-
raise ValueError(
|
| 308 |
-
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 309 |
-
" only forward one of the two."
|
| 310 |
-
)
|
| 311 |
-
elif prompt is None and prompt_embeds is None:
|
| 312 |
-
raise ValueError(
|
| 313 |
-
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 314 |
-
)
|
| 315 |
-
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 316 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 317 |
-
|
| 318 |
-
if prompt is not None and negative_prompt_embeds is not None:
|
| 319 |
-
raise ValueError(
|
| 320 |
-
f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:"
|
| 321 |
-
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 322 |
-
)
|
| 323 |
-
|
| 324 |
-
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 325 |
-
raise ValueError(
|
| 326 |
-
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 327 |
-
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 328 |
-
)
|
| 329 |
-
|
| 330 |
-
if prompt_embeds is not None and prompt_attention_mask is None:
|
| 331 |
-
raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.")
|
| 332 |
-
|
| 333 |
-
if negative_prompt_embeds is not None and negative_prompt_attention_mask is None:
|
| 334 |
-
raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.")
|
| 335 |
-
|
| 336 |
-
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 337 |
-
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 338 |
-
raise ValueError(
|
| 339 |
-
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 340 |
-
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 341 |
-
f" {negative_prompt_embeds.shape}."
|
| 342 |
-
)
|
| 343 |
-
if prompt_attention_mask.shape != negative_prompt_attention_mask.shape:
|
| 344 |
-
raise ValueError(
|
| 345 |
-
"`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but"
|
| 346 |
-
f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`"
|
| 347 |
-
f" {negative_prompt_attention_mask.shape}."
|
| 348 |
-
)
|
| 349 |
-
|
| 350 |
-
def _text_preprocessing(self, text):
|
| 351 |
-
|
| 352 |
-
if not isinstance(text, (tuple, list)):
|
| 353 |
-
text = [text]
|
| 354 |
-
|
| 355 |
-
def process(text: str):
|
| 356 |
-
text = text.lower().strip()
|
| 357 |
-
return text
|
| 358 |
-
|
| 359 |
-
return [process(t) for t in text]
|
| 360 |
-
|
| 361 |
-
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 362 |
-
if latents is not None:
|
| 363 |
-
return latents.to(device=device, dtype=dtype)
|
| 364 |
-
|
| 365 |
-
shape = (
|
| 366 |
-
batch_size,
|
| 367 |
-
num_channels_latents,
|
| 368 |
-
int(height) // self.vae_scale_factor,
|
| 369 |
-
int(width) // self.vae_scale_factor,
|
| 370 |
-
)
|
| 371 |
-
if isinstance(generator, list) and len(generator) != batch_size:
|
| 372 |
-
raise ValueError(
|
| 373 |
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 374 |
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 375 |
-
)
|
| 376 |
-
|
| 377 |
-
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 378 |
-
return latents
|
| 379 |
-
|
| 380 |
-
@property
|
| 381 |
-
def guidance_scale(self):
|
| 382 |
-
return self._guidance_scale
|
| 383 |
-
|
| 384 |
-
@property
|
| 385 |
-
def do_classifier_free_guidance(self):
|
| 386 |
-
return self._guidance_scale > 1.0
|
| 387 |
-
|
| 388 |
-
@property
|
| 389 |
-
def num_timesteps(self):
|
| 390 |
-
return self._num_timesteps
|
| 391 |
-
|
| 392 |
-
@property
|
| 393 |
-
def interrupt(self):
|
| 394 |
-
return self._interrupt
|
| 395 |
-
|
| 396 |
-
@torch.no_grad()
|
| 397 |
-
def __call__(
|
| 398 |
-
self,
|
| 399 |
-
prompt: Union[str, List[str]] = None,
|
| 400 |
-
negative_prompt: str = "",
|
| 401 |
-
num_inference_steps: int = 20,
|
| 402 |
-
timesteps: List[int] = None,
|
| 403 |
-
sigmas: List[float] = None,
|
| 404 |
-
guidance_scale: float = 4.5,
|
| 405 |
-
num_images_per_prompt: Optional[int] = 1,
|
| 406 |
-
height: int = 512,
|
| 407 |
-
width: int = 512,
|
| 408 |
-
eta: float = 0.0,
|
| 409 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 410 |
-
latents: Optional[torch.Tensor] = None,
|
| 411 |
-
prompt_embeds: Optional[torch.Tensor] = None,
|
| 412 |
-
prompt_attention_mask: Optional[torch.Tensor] = None,
|
| 413 |
-
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 414 |
-
negative_prompt_attention_mask: Optional[torch.Tensor] = None,
|
| 415 |
-
output_type: Optional[str] = "pil",
|
| 416 |
-
return_dict: bool = False,
|
| 417 |
-
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
| 418 |
-
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 419 |
-
max_sequence_length: int = 512,
|
| 420 |
-
) -> Union[List[PIL.Image.Image], np.ndarray]:
|
| 421 |
-
"""
|
| 422 |
-
Function invoked when calling the pipeline for generation.
|
| 423 |
-
|
| 424 |
-
Args:
|
| 425 |
-
prompt (`str` or `List[str]`, *optional*):
|
| 426 |
-
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 427 |
-
instead.
|
| 428 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
| 429 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 430 |
-
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 431 |
-
less than `1`).
|
| 432 |
-
num_inference_steps (`int`, *optional*, defaults to 20):
|
| 433 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 434 |
-
expense of slower inference.
|
| 435 |
-
timesteps (`List[int]`, *optional*):
|
| 436 |
-
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
|
| 437 |
-
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
|
| 438 |
-
passed will be used. Must be in descending order.
|
| 439 |
-
sigmas (`List[float]`, *optional*):
|
| 440 |
-
Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
|
| 441 |
-
their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
|
| 442 |
-
will be used.
|
| 443 |
-
guidance_scale (`float`, *optional*, defaults to 4.5):
|
| 444 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 445 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 446 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 447 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 448 |
-
usually at the expense of lower image quality.
|
| 449 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 450 |
-
The number of images to generate per prompt.
|
| 451 |
-
height (`int`, *optional*, defaults to self.unet.config.sample_size):
|
| 452 |
-
The height in pixels of the generated image.
|
| 453 |
-
width (`int`, *optional*, defaults to self.unet.config.sample_size):
|
| 454 |
-
The width in pixels of the generated image.
|
| 455 |
-
eta (`float`, *optional*, defaults to 0.0):
|
| 456 |
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 457 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 458 |
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 459 |
-
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 460 |
-
to make generation deterministic.
|
| 461 |
-
latents (`torch.Tensor`, *optional*):
|
| 462 |
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 463 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 464 |
-
tensor will ge generated by sampling using the supplied random `generator`.
|
| 465 |
-
prompt_embeds (`torch.Tensor`, *optional*):
|
| 466 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 467 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
| 468 |
-
prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings.
|
| 469 |
-
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 470 |
-
Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not
|
| 471 |
-
provided, negative_prompt_embeds will be generated from `negative_prompt` input argument.
|
| 472 |
-
negative_prompt_attention_mask (`torch.Tensor`, *optional*):
|
| 473 |
-
Pre-generated attention mask for negative text embeddings.
|
| 474 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 475 |
-
The output format of the generate image. Choose between
|
| 476 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 477 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
| 478 |
-
Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple.
|
| 479 |
-
callback_on_step_end (`Callable`, *optional*):
|
| 480 |
-
A function that calls at the end of each denoising steps during the inference. The function is called
|
| 481 |
-
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
| 482 |
-
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
| 483 |
-
`callback_on_step_end_tensor_inputs`.
|
| 484 |
-
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 485 |
-
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 486 |
-
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 487 |
-
`._callback_tensor_inputs` attribute of your pipeline class.
|
| 488 |
-
max_sequence_length (`int` defaults to `512`):
|
| 489 |
-
Maximum sequence length to use with the `prompt`.
|
| 490 |
-
|
| 491 |
-
Examples:
|
| 492 |
-
|
| 493 |
-
Returns:
|
| 494 |
-
Union[List[PIL.Image.Image], np.ndarray] is returned,
|
| 495 |
-
otherwise a `tuple` is returned where the first element is a list with the generated images
|
| 496 |
-
"""
|
| 497 |
-
|
| 498 |
-
# if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
|
| 499 |
-
# callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
|
| 500 |
-
|
| 501 |
-
# 1. Check inputs. Raise error if not correct
|
| 502 |
-
|
| 503 |
-
self.check_inputs(
|
| 504 |
-
prompt,
|
| 505 |
-
height,
|
| 506 |
-
width,
|
| 507 |
-
callback_on_step_end_tensor_inputs,
|
| 508 |
-
negative_prompt,
|
| 509 |
-
prompt_embeds,
|
| 510 |
-
negative_prompt_embeds,
|
| 511 |
-
prompt_attention_mask,
|
| 512 |
-
negative_prompt_attention_mask,
|
| 513 |
-
)
|
| 514 |
-
|
| 515 |
-
self._guidance_scale = guidance_scale
|
| 516 |
-
self._interrupt = False
|
| 517 |
-
|
| 518 |
-
# 2. Default height and width to transformer
|
| 519 |
-
if prompt is not None and isinstance(prompt, str):
|
| 520 |
-
batch_size = 1
|
| 521 |
-
elif prompt is not None and isinstance(prompt, list):
|
| 522 |
-
batch_size = len(prompt)
|
| 523 |
-
else:
|
| 524 |
-
batch_size = prompt_embeds.shape[0]
|
| 525 |
-
|
| 526 |
-
device = self._execution_device
|
| 527 |
-
|
| 528 |
-
# 3. Encode input prompt
|
| 529 |
-
(
|
| 530 |
-
prompt_embeds,
|
| 531 |
-
prompt_attention_mask,
|
| 532 |
-
negative_prompt_embeds,
|
| 533 |
-
negative_prompt_attention_mask,
|
| 534 |
-
) = self.encode_prompt(
|
| 535 |
-
prompt,
|
| 536 |
-
self.do_classifier_free_guidance,
|
| 537 |
-
negative_prompt=negative_prompt,
|
| 538 |
-
num_images_per_prompt=num_images_per_prompt,
|
| 539 |
-
device=device,
|
| 540 |
-
prompt_embeds=prompt_embeds,
|
| 541 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
| 542 |
-
prompt_attention_mask=prompt_attention_mask,
|
| 543 |
-
negative_prompt_attention_mask=negative_prompt_attention_mask,
|
| 544 |
-
max_sequence_length=max_sequence_length,
|
| 545 |
-
)
|
| 546 |
-
if self.do_classifier_free_guidance:
|
| 547 |
-
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 548 |
-
prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0)
|
| 549 |
-
|
| 550 |
-
# 4. Prepare timesteps
|
| 551 |
-
timesteps, num_inference_steps = retrieve_timesteps(
|
| 552 |
-
self.scheduler, num_inference_steps, device, timesteps, sigmas
|
| 553 |
-
)
|
| 554 |
-
|
| 555 |
-
# 5. Prepare latents.
|
| 556 |
-
latent_channels = self.transformer.config.in_channels
|
| 557 |
-
latents = self.prepare_latents(
|
| 558 |
-
batch_size * num_images_per_prompt,
|
| 559 |
-
latent_channels,
|
| 560 |
-
height,
|
| 561 |
-
width,
|
| 562 |
-
torch.float32,
|
| 563 |
-
device,
|
| 564 |
-
generator,
|
| 565 |
-
latents,
|
| 566 |
-
)
|
| 567 |
-
|
| 568 |
-
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 569 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 570 |
-
|
| 571 |
-
# 7. Denoising loop
|
| 572 |
-
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
| 573 |
-
self._num_timesteps = len(timesteps)
|
| 574 |
-
|
| 575 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 576 |
-
for i, t in enumerate(timesteps):
|
| 577 |
-
if self.interrupt:
|
| 578 |
-
continue
|
| 579 |
-
|
| 580 |
-
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
| 581 |
-
latent_model_input = latent_model_input.to(prompt_embeds.dtype)
|
| 582 |
-
|
| 583 |
-
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
| 584 |
-
timestep = t.expand(latent_model_input.shape[0]).to(latents.dtype)
|
| 585 |
-
|
| 586 |
-
# predict noise model_output
|
| 587 |
-
noise_pred = self.transformer(
|
| 588 |
-
latent_model_input,
|
| 589 |
-
encoder_hidden_states=prompt_embeds,
|
| 590 |
-
encoder_attention_mask=prompt_attention_mask,
|
| 591 |
-
timestep=timestep,
|
| 592 |
-
return_dict=False,
|
| 593 |
-
)[0]
|
| 594 |
-
noise_pred = noise_pred.float()
|
| 595 |
-
|
| 596 |
-
# perform guidance
|
| 597 |
-
if self.do_classifier_free_guidance:
|
| 598 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 599 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 600 |
-
|
| 601 |
-
# learned sigma
|
| 602 |
-
if self.transformer.config.out_channels // 2 == latent_channels:
|
| 603 |
-
noise_pred = noise_pred.chunk(2, dim=1)[0]
|
| 604 |
-
else:
|
| 605 |
-
noise_pred = noise_pred
|
| 606 |
-
|
| 607 |
-
# compute previous image: x_t -> x_t-1
|
| 608 |
-
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 609 |
-
|
| 610 |
-
if callback_on_step_end is not None:
|
| 611 |
-
callback_kwargs = {}
|
| 612 |
-
for k in callback_on_step_end_tensor_inputs:
|
| 613 |
-
callback_kwargs[k] = locals()[k]
|
| 614 |
-
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 615 |
-
|
| 616 |
-
latents = callback_outputs.pop("latents", latents)
|
| 617 |
-
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 618 |
-
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 619 |
-
|
| 620 |
-
# call the callback, if provided
|
| 621 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 622 |
-
progress_bar.update()
|
| 623 |
-
|
| 624 |
-
if output_type == "latent":
|
| 625 |
-
image = latents
|
| 626 |
-
else:
|
| 627 |
-
latents = latents.to(self.vae.dtype)
|
| 628 |
-
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 629 |
-
|
| 630 |
-
if not output_type == "latent":
|
| 631 |
-
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 632 |
-
#image = numpy_to_pil(image)
|
| 633 |
-
|
| 634 |
-
# Offload all models
|
| 635 |
-
#print("Offload all models 4")
|
| 636 |
-
self.maybe_free_model_hooks()
|
| 637 |
-
|
| 638 |
-
if not return_dict:
|
| 639 |
-
return (image,)
|
| 640 |
-
|
| 641 |
-
return Union[List[PIL.Image.Image], np.ndarray]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.ipynb_checkpoints/test-checkpoint.ipynb
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
.ipynb_checkpoints/waifu-checkpoint.png
DELETED
|
Binary file (439 kB)
|
|
|
__pycache__/pipeline_waifu.cpython-310.pyc
DELETED
|
Binary file (20.3 kB)
|
|
|
__pycache__/pipeline_waifu.cpython-311.pyc
DELETED
|
Binary file (14.6 kB)
|
|
|