Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,7 +9,7 @@ import logging
|
|
| 9 |
from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL, AutoPipelineForImage2Image
|
| 10 |
from huggingface_hub import login
|
| 11 |
from diffusers.utils import load_image
|
| 12 |
-
from lora_loading_patch import load_lora_into_transformer
|
| 13 |
import time
|
| 14 |
from datetime import datetime
|
| 15 |
from io import BytesIO
|
|
@@ -22,6 +22,8 @@ import re
|
|
| 22 |
import json
|
| 23 |
import random
|
| 24 |
import string
|
|
|
|
|
|
|
| 25 |
|
| 26 |
# Login Hugging Face Hub
|
| 27 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
|
@@ -37,10 +39,10 @@ print(device)
|
|
| 37 |
base_model = "black-forest-labs/FLUX.1-Krea-dev"
|
| 38 |
# load pipe
|
| 39 |
|
| 40 |
-
txt2img_pipe =
|
| 41 |
|
| 42 |
txt2img_pipe = txt2img_pipe.to(device)
|
| 43 |
-
txt2img_pipe.__class__.load_lora_into_transformer = classmethod(load_lora_into_transformer)
|
| 44 |
|
| 45 |
MAX_SEED = 2**32 - 1
|
| 46 |
|
|
|
|
| 9 |
from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL, AutoPipelineForImage2Image
|
| 10 |
from huggingface_hub import login
|
| 11 |
from diffusers.utils import load_image
|
| 12 |
+
#from lora_loading_patch import load_lora_into_transformer
|
| 13 |
import time
|
| 14 |
from datetime import datetime
|
| 15 |
from io import BytesIO
|
|
|
|
| 22 |
import json
|
| 23 |
import random
|
| 24 |
import string
|
| 25 |
+
from diffusers import FluxPipeline
|
| 26 |
+
|
| 27 |
|
| 28 |
# Login Hugging Face Hub
|
| 29 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
|
|
|
| 39 |
base_model = "black-forest-labs/FLUX.1-Krea-dev"
|
| 40 |
# load pipe
|
| 41 |
|
| 42 |
+
txt2img_pipe = FluxPipeline.from_pretrained(base_model, torch_dtype=dtype)
|
| 43 |
|
| 44 |
txt2img_pipe = txt2img_pipe.to(device)
|
| 45 |
+
#txt2img_pipe.__class__.load_lora_into_transformer = classmethod(load_lora_into_transformer)
|
| 46 |
|
| 47 |
MAX_SEED = 2**32 - 1
|
| 48 |
|