iitolstykh commited on
Commit
816d4c8
·
verified ·
1 Parent(s): 8906f2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -9
app.py CHANGED
@@ -116,7 +116,7 @@ model = load_checkpoint_and_dispatch(
116
  dtype=torch.bfloat16,
117
  force_hooks=True,
118
  ).eval()
119
- vae_model = vae_model.to(model.device).eval()
120
 
121
 
122
  # Inferencer Preparing
@@ -179,14 +179,13 @@ def text_to_image(prompt, show_thinking=False, cfg_text_scale=4.0, cfg_interval=
179
 
180
  result = {"text": "", "image": None}
181
  # Call inferencer with or without think parameter based on user choice
182
- with torch.amp.autocast("cuda", enabled=True, dtype=torch.bfloat16):
183
- for i in inferencer(text=prompt, think=show_thinking, understanding_output=False, **inference_hyper):
184
- if type(i) == str:
185
- result["text"] += i
186
- else:
187
- result["image"] = i
188
-
189
- yield result["image"], result.get("text", None)
190
 
191
 
192
  # Image Understanding function with thinking option and hyperparameters
 
116
  dtype=torch.bfloat16,
117
  force_hooks=True,
118
  ).eval()
119
+ # vae_model = vae_model.to(model.device).eval()
120
 
121
 
122
  # Inferencer Preparing
 
179
 
180
  result = {"text": "", "image": None}
181
  # Call inferencer with or without think parameter based on user choice
182
+ for i in inferencer(text=prompt, think=show_thinking, understanding_output=False, **inference_hyper):
183
+ if type(i) == str:
184
+ result["text"] += i
185
+ else:
186
+ result["image"] = i
187
+
188
+ yield result["image"], result.get("text", None)
 
189
 
190
 
191
  # Image Understanding function with thinking option and hyperparameters