jdavis commited on
Commit
011b800
·
verified ·
1 Parent(s): beec38c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -2
app.py CHANGED
@@ -28,6 +28,9 @@ os.environ["HF_HOME"] = os.path.join(os.getcwd(), ".cache/huggingface")
28
  import torch
29
  from diffusers import FluxFillPipeline
30
 
 
 
 
31
  # Constants
32
  MAX_SEED = np.iinfo(np.int32).max
33
  MAX_IMAGE_SIZE = 2048
@@ -95,11 +98,12 @@ def load_model():
95
  st.info(f"Token available: {'Yes' if token else 'No'}")
96
 
97
  try:
98
- # Use the same parameters as the Gradio version, just with token
99
  model = FluxFillPipeline.from_pretrained(
100
  "black-forest-labs/FLUX.1-Fill-dev",
101
  token=token,
102
- torch_dtype=torch.bfloat16
 
103
  )
104
  st.success("Model loaded successfully!")
105
  return model.to(device)
 
28
  import torch
29
  from diffusers import FluxFillPipeline
30
 
31
+ import warnings
32
+ warnings.filterwarnings("ignore", message=".*add_prefix_space.*")
33
+
34
  # Constants
35
  MAX_SEED = np.iinfo(np.int32).max
36
  MAX_IMAGE_SIZE = 2048
 
98
  st.info(f"Token available: {'Yes' if token else 'No'}")
99
 
100
  try:
101
+ # Add use_fast_tokenizer=True to address the tokenizer warning
102
  model = FluxFillPipeline.from_pretrained(
103
  "black-forest-labs/FLUX.1-Fill-dev",
104
  token=token,
105
+ torch_dtype=torch.bfloat16,
106
+ tokenizer_config={"add_prefix_space": False}
107
  )
108
  st.success("Model loaded successfully!")
109
  return model.to(device)