Update app.py
Browse files
app.py
CHANGED
|
@@ -42,24 +42,6 @@ h1 {
|
|
| 42 |
}
|
| 43 |
'''
|
| 44 |
|
| 45 |
-
def progress_bar_html(label):
|
| 46 |
-
"""Returns an HTML snippet with a label and an animated thin progress bar."""
|
| 47 |
-
return f"""
|
| 48 |
-
<div style="display: flex; align-items: center;">
|
| 49 |
-
<span style="margin-right: 10px;">{label}</span>
|
| 50 |
-
<div style="position: relative; width: 110px; height: 5px; background-color: #e0e0e0; border-radius: 2.5px; overflow: hidden;">
|
| 51 |
-
<div style="width: 100%; height: 100%; background-color: #90ee90; animation: progressAnimation 2s infinite;"></div>
|
| 52 |
-
</div>
|
| 53 |
-
<style>
|
| 54 |
-
@keyframes progressAnimation {{
|
| 55 |
-
0% {{ opacity: 1; }}
|
| 56 |
-
50% {{ opacity: 0.5; }}
|
| 57 |
-
100% {{ opacity: 1; }}
|
| 58 |
-
}}
|
| 59 |
-
</style>
|
| 60 |
-
</div>
|
| 61 |
-
"""
|
| 62 |
-
|
| 63 |
MAX_MAX_NEW_TOKENS = 2048
|
| 64 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
| 65 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
|
@@ -186,6 +168,7 @@ def generate_image_fn(
|
|
| 186 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
| 187 |
if "negative_prompt" in batch_options and batch_options["negative_prompt"] is not None:
|
| 188 |
batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
|
|
|
|
| 189 |
if device.type == "cuda":
|
| 190 |
with torch.autocast("cuda", dtype=torch.float16):
|
| 191 |
outputs = sd_pipe(**batch_options)
|
|
@@ -214,12 +197,35 @@ def generate(
|
|
| 214 |
text = input_dict["text"]
|
| 215 |
files = input_dict.get("files", [])
|
| 216 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 217 |
if text.strip().lower().startswith("@image"):
|
| 218 |
-
# Remove the "@image" tag and use the rest as prompt
|
| 219 |
prompt = text[len("@image"):].strip()
|
| 220 |
-
#
|
| 221 |
-
|
| 222 |
-
yield gr.HTML(progress_html)
|
| 223 |
image_paths, used_seed = generate_image_fn(
|
| 224 |
prompt=prompt,
|
| 225 |
negative_prompt="",
|
|
@@ -233,8 +239,7 @@ def generate(
|
|
| 233 |
use_resolution_binning=True,
|
| 234 |
num_images=1,
|
| 235 |
)
|
| 236 |
-
#
|
| 237 |
-
yield gr.HTML.update(value="")
|
| 238 |
yield gr.Image(image_paths[0])
|
| 239 |
return # Exit early
|
| 240 |
|
|
@@ -275,16 +280,21 @@ def generate(
|
|
| 275 |
thread = Thread(target=model_m.generate, kwargs=generation_kwargs)
|
| 276 |
thread.start()
|
| 277 |
|
| 278 |
-
#
|
| 279 |
-
|
| 280 |
-
yield gr.HTML(progress_html)
|
| 281 |
buffer = ""
|
| 282 |
for new_text in streamer:
|
| 283 |
buffer += new_text
|
| 284 |
buffer = buffer.replace("<|im_end|>", "")
|
| 285 |
time.sleep(0.01)
|
| 286 |
-
|
| 287 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 288 |
else:
|
| 289 |
input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt")
|
| 290 |
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
|
|
@@ -303,23 +313,28 @@ def generate(
|
|
| 303 |
"num_beams": 1,
|
| 304 |
"repetition_penalty": repetition_penalty,
|
| 305 |
}
|
| 306 |
-
|
| 307 |
-
|
| 308 |
|
| 309 |
-
#
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
buffer = ""
|
| 313 |
for new_text in streamer:
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 319 |
|
| 320 |
# If TTS was requested, convert the final response to speech.
|
| 321 |
if is_tts and voice:
|
| 322 |
-
output_file = asyncio.run(text_to_speech(
|
| 323 |
yield gr.Audio(output_file, autoplay=True)
|
| 324 |
|
| 325 |
demo = gr.ChatInterface(
|
|
|
|
| 42 |
}
|
| 43 |
'''
|
| 44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
MAX_MAX_NEW_TOKENS = 2048
|
| 46 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
| 47 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
|
|
|
| 168 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
| 169 |
if "negative_prompt" in batch_options and batch_options["negative_prompt"] is not None:
|
| 170 |
batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
|
| 171 |
+
# Wrap the pipeline call in autocast if using CUDA
|
| 172 |
if device.type == "cuda":
|
| 173 |
with torch.autocast("cuda", dtype=torch.float16):
|
| 174 |
outputs = sd_pipe(**batch_options)
|
|
|
|
| 197 |
text = input_dict["text"]
|
| 198 |
files = input_dict.get("files", [])
|
| 199 |
|
| 200 |
+
# Define an HTML template for the animated progress bar.
|
| 201 |
+
# The bar is a thin 5px line in light green with a simple opacity animation.
|
| 202 |
+
progress_bar_html = """
|
| 203 |
+
<div style="display: flex; align-items: center;">
|
| 204 |
+
<span>{message}</span>
|
| 205 |
+
<div style="flex-grow: 1; margin-left: 10px;">
|
| 206 |
+
<div class="progress-bar"></div>
|
| 207 |
+
</div>
|
| 208 |
+
</div>
|
| 209 |
+
<style>
|
| 210 |
+
.progress-bar {{
|
| 211 |
+
width: 100%;
|
| 212 |
+
height: 5px;
|
| 213 |
+
background: lightgreen;
|
| 214 |
+
animation: progressAnim 2s infinite;
|
| 215 |
+
}}
|
| 216 |
+
@keyframes progressAnim {{
|
| 217 |
+
0% {{ opacity: 0.5; }}
|
| 218 |
+
50% {{ opacity: 1; }}
|
| 219 |
+
100% {{ opacity: 0.5; }}
|
| 220 |
+
}}
|
| 221 |
+
</style>
|
| 222 |
+
"""
|
| 223 |
+
|
| 224 |
if text.strip().lower().startswith("@image"):
|
| 225 |
+
# Remove the "@image" tag and use the rest as prompt.
|
| 226 |
prompt = text[len("@image"):].strip()
|
| 227 |
+
# Yield progress bar for image generation.
|
| 228 |
+
yield gr.HTML(progress_bar_html.format(message="Generating Image..."))
|
|
|
|
| 229 |
image_paths, used_seed = generate_image_fn(
|
| 230 |
prompt=prompt,
|
| 231 |
negative_prompt="",
|
|
|
|
| 239 |
use_resolution_binning=True,
|
| 240 |
num_images=1,
|
| 241 |
)
|
| 242 |
+
# Once the image is generated, yield the image (thus replacing the progress bar).
|
|
|
|
| 243 |
yield gr.Image(image_paths[0])
|
| 244 |
return # Exit early
|
| 245 |
|
|
|
|
| 280 |
thread = Thread(target=model_m.generate, kwargs=generation_kwargs)
|
| 281 |
thread.start()
|
| 282 |
|
| 283 |
+
# Yield progress bar for multimodal input processing.
|
| 284 |
+
yield gr.HTML(progress_bar_html.format(message="Thinking..."))
|
|
|
|
| 285 |
buffer = ""
|
| 286 |
for new_text in streamer:
|
| 287 |
buffer += new_text
|
| 288 |
buffer = buffer.replace("<|im_end|>", "")
|
| 289 |
time.sleep(0.01)
|
| 290 |
+
# During streaming, update the progress UI (progress bar remains visible).
|
| 291 |
+
combined_html = f"""
|
| 292 |
+
<div style="display: flex; flex-direction: column;">
|
| 293 |
+
{progress_bar_html.format(message="Thinking...")}
|
| 294 |
+
<div style="margin-top: 10px;">{buffer}</div>
|
| 295 |
+
</div>
|
| 296 |
+
"""
|
| 297 |
+
yield gr.HTML(combined_html)
|
| 298 |
else:
|
| 299 |
input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt")
|
| 300 |
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
|
|
|
|
| 313 |
"num_beams": 1,
|
| 314 |
"repetition_penalty": repetition_penalty,
|
| 315 |
}
|
| 316 |
+
t = Thread(target=model.generate, kwargs=generation_kwargs)
|
| 317 |
+
t.start()
|
| 318 |
|
| 319 |
+
# Yield initial progress bar for text generation.
|
| 320 |
+
yield gr.HTML(progress_bar_html.format(message="Thinking..."))
|
| 321 |
+
outputs = []
|
|
|
|
| 322 |
for new_text in streamer:
|
| 323 |
+
outputs.append(new_text)
|
| 324 |
+
combined_html = f"""
|
| 325 |
+
<div style="display: flex; flex-direction: column;">
|
| 326 |
+
{progress_bar_html.format(message="Thinking...")}
|
| 327 |
+
<div style="margin-top: 10px;">{''.join(outputs)}</div>
|
| 328 |
+
</div>
|
| 329 |
+
"""
|
| 330 |
+
yield gr.HTML(combined_html)
|
| 331 |
+
final_response = "".join(outputs)
|
| 332 |
+
# Final response: progress bar is removed and only the generated text is shown.
|
| 333 |
+
yield final_response
|
| 334 |
|
| 335 |
# If TTS was requested, convert the final response to speech.
|
| 336 |
if is_tts and voice:
|
| 337 |
+
output_file = asyncio.run(text_to_speech(final_response, voice))
|
| 338 |
yield gr.Audio(output_file, autoplay=True)
|
| 339 |
|
| 340 |
demo = gr.ChatInterface(
|