Ammar Vohra commited on
Commit
c5a4eb1
·
1 Parent(s): 16fce1b

adjusted the model

Browse files
Files changed (2) hide show
  1. app.py +7 -7
  2. requirements.txt +2 -1
app.py CHANGED
@@ -3,21 +3,21 @@ import numpy as np
3
  import random
4
  import spaces
5
  import torch
6
- from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL
7
- from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
8
  from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
9
  from huggingface_hub import login
10
  import os
11
  import tempfile
 
12
 
13
  login(token=os.getenv('hf_token'))
14
  dtype = torch.bfloat16
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
16
 
17
- taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
18
- good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
19
- pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device)
20
- pipe.load_lora_weights("gokaygokay/Flux-Seamless-Texture-LoRA", weight_name="seamless_texture.safetensors", low_cpu_mem_usage=True)
21
  torch.cuda.empty_cache()
22
 
23
  MAX_SEED = np.iinfo(np.int32).max
@@ -40,7 +40,7 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidan
40
  height=height,
41
  generator=generator,
42
  output_type="pil",
43
- good_vae=good_vae,
44
  ):
45
  final_img = img
46
 
 
3
  import random
4
  import spaces
5
  import torch
6
+ from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL
7
+ from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
8
  from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
9
  from huggingface_hub import login
10
  import os
11
  import tempfile
12
+ from peft import PeftModel
13
 
14
  login(token=os.getenv('hf_token'))
15
  dtype = torch.bfloat16
16
  device = "cuda" if torch.cuda.is_available() else "cpu"
17
 
18
+ vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
19
+ pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype, vae=vae).to(device)
20
+ pipe.load_lora_weights("gokaygokay/Flux-Seamless-Texture-LoRA", weight_name="seamless_texture.safetensors")
 
21
  torch.cuda.empty_cache()
22
 
23
  MAX_SEED = np.iinfo(np.int32).max
 
40
  height=height,
41
  generator=generator,
42
  output_type="pil",
43
+ good_vae=vae,
44
  ):
45
  final_img = img
46
 
requirements.txt CHANGED
@@ -1,3 +1,5 @@
 
 
1
  gradio==4.29.0 # Explicitly pin Gradio to a known stable version
2
  accelerate
3
  diffusers
@@ -5,7 +7,6 @@ torch
5
  numpy
6
  transformers
7
  xformers
8
- git+https://github.com/huggingface/diffusers.git
9
  sentencepiece
10
  scipy
11
  pydantic==2.10.6 # Pinning pydantic to avoid potential conflicts with Gradio
 
1
+ git+https://github.com/huggingface/diffusers.git
2
+ git+https://github.com/huggingface/peft.git
3
  gradio==4.29.0 # Explicitly pin Gradio to a known stable version
4
  accelerate
5
  diffusers
 
7
  numpy
8
  transformers
9
  xformers
 
10
  sentencepiece
11
  scipy
12
  pydantic==2.10.6 # Pinning pydantic to avoid potential conflicts with Gradio