Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -14,40 +14,40 @@ model.to("cuda:0")
|
|
| 14 |
|
| 15 |
@spaces.GPU
|
| 16 |
def bot_streaming(message, history):
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
buffer = ""
|
| 44 |
-
for new_text in streamer:
|
| 45 |
-
|
| 46 |
-
buffer += new_text
|
| 47 |
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
|
| 52 |
|
| 53 |
demo = gr.ChatInterface(fn=bot_streaming, title="LLaVA NeXT", examples=[{"text": "What is on the flower?", "files":["./bee.jpg"]},
|
|
|
|
| 14 |
|
| 15 |
@spaces.GPU
|
| 16 |
def bot_streaming(message, history):
|
| 17 |
+
chat_history = []
|
| 18 |
+
if message["files"]:
|
| 19 |
+
image = message["files"][-1]["path"]
|
| 20 |
+
else:
|
| 21 |
+
for hist in history:
|
| 22 |
+
if type(hist[0])==tuple:
|
| 23 |
+
image = hist[0][0]
|
| 24 |
+
|
| 25 |
+
if len(history) > 0 and image:
|
| 26 |
+
chat_history.append({"role": "user", "content": f'<image>\n{message['text']}'})
|
| 27 |
+
for human, assistant in history[1:]:
|
| 28 |
+
chat_history.append({"role": "user", "content": human })
|
| 29 |
+
chat_history.append({"role": "assistant", "content": assistant })
|
| 30 |
+
|
| 31 |
+
if image is None:
|
| 32 |
+
gr.Error("You need to upload an image for LLaVA to work.")
|
| 33 |
+
prompt=f"[INST] <image>\n{message['text']} [/INST]"
|
| 34 |
+
image = Image.open(image).convert("RGB")
|
| 35 |
+
inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
|
| 36 |
+
streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": True})
|
| 37 |
+
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=100)
|
| 38 |
+
generated_text = ""
|
| 39 |
+
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
| 40 |
+
thread.start()
|
| 41 |
+
text_prompt =f"[INST] \n{message['text']} [/INST]"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
|
| 43 |
+
buffer = ""
|
| 44 |
+
for new_text in streamer:
|
| 45 |
+
|
| 46 |
+
buffer += new_text
|
| 47 |
+
|
| 48 |
+
generated_text_without_prompt = buffer[len(text_prompt):]
|
| 49 |
+
time.sleep(0.04)
|
| 50 |
+
yield generated_text_without_prompt
|
| 51 |
|
| 52 |
|
| 53 |
demo = gr.ChatInterface(fn=bot_streaming, title="LLaVA NeXT", examples=[{"text": "What is on the flower?", "files":["./bee.jpg"]},
|