jiuface commited on
Commit
480f77e
·
verified ·
1 Parent(s): 0911836

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -10
app.py CHANGED
@@ -27,6 +27,9 @@ from huggingface_hub import hf_hub_download
27
  from diffusers.quantizers import PipelineQuantizationConfig
28
  from diffusers import (FluxPriorReduxPipeline, FluxInpaintPipeline, FluxFillPipeline, FluxKontextPipeline, FluxPipeline)
29
 
 
 
 
30
  # Login Hugging Face Hub
31
  HF_TOKEN = os.environ.get("HF_TOKEN")
32
  login(token=HF_TOKEN)
@@ -36,16 +39,10 @@ import diffusers
36
  dtype = torch.bfloat16
37
  device = "cuda:0"
38
 
39
- base_model = "black-forest-labs/FLUX.1-Krea-dev"
40
-
41
- # pipeline_quant_config = PipelineQuantizationConfig(
42
- # quant_backend="bitsandbytes_4bit",
43
- # quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16},
44
- # components_to_quantize=["transformer", "text_encoder_2"],
45
- # )
46
-
47
- txt2img_pipe = FluxKontextPipeline.from_pretrained(base_model, torch_dtype=dtype)
48
- txt2img_pipe = txt2img_pipe.to(device)
49
 
50
  MAX_SEED = 2**32 - 1
51
 
 
27
  from diffusers.quantizers import PipelineQuantizationConfig
28
  from diffusers import (FluxPriorReduxPipeline, FluxInpaintPipeline, FluxFillPipeline, FluxKontextPipeline, FluxPipeline)
29
 
30
+ from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL
31
+ from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
32
+
33
  # Login Hugging Face Hub
34
  HF_TOKEN = os.environ.get("HF_TOKEN")
35
  login(token=HF_TOKEN)
 
39
  dtype = torch.bfloat16
40
  device = "cuda:0"
41
 
42
+ taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
43
+ good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
44
+ txt2img_pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device)
45
+ torch.cuda.empty_cache()
 
 
 
 
 
 
46
 
47
  MAX_SEED = 2**32 - 1
48