Spaces:
Sleeping
Sleeping
update audio case
#5
by
youngsheen
- opened
- app.py +41 -40
- examples/{bird-twitter-car.wav β 1034346401.mp4} +2 -2
- examples/{door.of.bar.raining2.wav β Traffic and pedestrians.wav} +2 -2
- examples/{output_v_1jgsRbGzCls.mp4 β WBS4I.mp4} +2 -2
- examples/Y--ZHUMfueO0.flac +0 -0
- examples/desert.jpg +0 -0
- examples/extreme_ironing.jpg +0 -0
- examples/sample_demo_1.mp4 +3 -0
- examples/{output_v_3V9tzjyr51I.mp4 β sample_demo_3.mp4} +2 -2
- examples/sample_demo_9.mp4 +3 -0
- examples/waterview.jpg +0 -0
app.py
CHANGED
|
@@ -22,6 +22,8 @@ title_markdown = ("""
|
|
| 22 |
<h5 style="margin: 0;">If this demo please you, please give us a star β on Github or π on this space.</h5>
|
| 23 |
</div>
|
| 24 |
</div>
|
|
|
|
|
|
|
| 25 |
<div align="center">
|
| 26 |
<div style="display:flex; gap: 0.25rem; margin-top: 10px;" align="center">
|
| 27 |
<a href="https://github.com/DAMO-NLP-SG/VideoLLaMA2"><img src='https://img.shields.io/badge/Github-VideoLLaMA2-9C276A'></a>
|
|
@@ -95,9 +97,8 @@ class Chat:
|
|
| 95 |
|
| 96 |
|
| 97 |
@spaces.GPU(duration=120)
|
| 98 |
-
def generate(
|
| 99 |
data = []
|
| 100 |
-
image = None
|
| 101 |
|
| 102 |
processor = handler.processor
|
| 103 |
try:
|
|
@@ -111,14 +112,6 @@ def generate(video, av, audio, message, chatbot, va_tag, textbox_in, temperature
|
|
| 111 |
else:
|
| 112 |
video_audio = video_audio.to(handler.model.device, dtype=dtype)
|
| 113 |
data.append((video_audio, '<video>'))
|
| 114 |
-
elif av is not None:
|
| 115 |
-
video_audio = processor['video'](av, va=va_tag=="Audio Vision")
|
| 116 |
-
if va_tag=="Audio Vision":
|
| 117 |
-
for k,v in video_audio.items():
|
| 118 |
-
video_audio[k] = v.to(handler.model.device, dtype=dtype)
|
| 119 |
-
else:
|
| 120 |
-
video_audio = video_audio.to(handler.model.device, dtype=dtype)
|
| 121 |
-
data.append((video_audio, '<video>'))
|
| 122 |
elif audio is not None:
|
| 123 |
data.append((processor['audio'](audio).to(handler.model.device, dtype=dtype), '<audio>'))
|
| 124 |
elif image is None and video is None:
|
|
@@ -134,7 +127,7 @@ def generate(video, av, audio, message, chatbot, va_tag, textbox_in, temperature
|
|
| 134 |
show_images = ""
|
| 135 |
if image is not None:
|
| 136 |
show_images += f'<img src="./file={image}" style="display: inline-block;width: 250px;max-height: 400px;">'
|
| 137 |
-
if video is not None
|
| 138 |
show_images += f'<video controls playsinline width="500" style="display: inline-block;" src="./file={video}"></video>'
|
| 139 |
if audio is not None:
|
| 140 |
show_images += f'<audio controls style="display: inline-block;" src="./file={audio}"></audio>'
|
|
@@ -148,7 +141,6 @@ def generate(video, av, audio, message, chatbot, va_tag, textbox_in, temperature
|
|
| 148 |
else:
|
| 149 |
previous_image = re.findall(r'<img src="./file=(.+?)"', chatbot[0][0])
|
| 150 |
previous_video = re.findall(r'<video controls playsinline width="500" style="display: inline-block;" src="./file=(.+?)"', chatbot[0][0])
|
| 151 |
-
previous_av = re.findall(r'<video controls playsinline width="500" style="display: inline-block;" src="./file=(.+?)"', chatbot[0][0])
|
| 152 |
previous_audio = re.findall(r'<audio controls style="display: inline-block;" src="./file=(.+?)"', chatbot[0][0])
|
| 153 |
if len(previous_image) > 0:
|
| 154 |
previous_image = previous_image[0]
|
|
@@ -162,12 +154,6 @@ def generate(video, av, audio, message, chatbot, va_tag, textbox_in, temperature
|
|
| 162 |
if video is not None and os.path.basename(previous_video) != os.path.basename(video):
|
| 163 |
message.clear()
|
| 164 |
one_turn_chat[0] += "\n" + show_images
|
| 165 |
-
elif len(previous_av) > 0:
|
| 166 |
-
previous_av = previous_av[0]
|
| 167 |
-
# 2.2 new video append or pure text input will start a new conversation
|
| 168 |
-
if av is not None and os.path.basename(previous_av) != os.path.basename(av):
|
| 169 |
-
message.clear()
|
| 170 |
-
one_turn_chat[0] += "\n" + show_images
|
| 171 |
elif len(previous_audio) > 0:
|
| 172 |
previous_audio = previous_audio[0]
|
| 173 |
# 2.3 new audio append or pure text input will start a new conversation
|
|
@@ -196,7 +182,7 @@ def generate(video, av, audio, message, chatbot, va_tag, textbox_in, temperature
|
|
| 196 |
one_turn_chat[1] = text_en_out
|
| 197 |
chatbot.append(one_turn_chat)
|
| 198 |
|
| 199 |
-
return gr.update(value=
|
| 200 |
|
| 201 |
|
| 202 |
def regenerate(message, chatbot):
|
|
@@ -240,8 +226,9 @@ with gr.Blocks(title='VideoLLaMA 2 π₯ππ₯', theme=theme, css=block_css) as
|
|
| 240 |
|
| 241 |
with gr.Row():
|
| 242 |
with gr.Column(scale=3):
|
|
|
|
|
|
|
| 243 |
video = gr.Video(label="Input Video")
|
| 244 |
-
av = gr.Video(label="Input Video_Audio")
|
| 245 |
audio = gr.Audio(label="Input Audio", type="filepath")
|
| 246 |
|
| 247 |
with gr.Accordion("Parameters", open=True) as parameter_row:
|
|
@@ -257,9 +244,9 @@ with gr.Blocks(title='VideoLLaMA 2 π₯ππ₯', theme=theme, css=block_css) as
|
|
| 257 |
va_tag = gr.Radio(choices=["Audio Vision", "Vision Only", "Audio Only"], value="Audio Vision", label="Select one")
|
| 258 |
|
| 259 |
temperature = gr.Slider(
|
| 260 |
-
minimum=0.
|
| 261 |
maximum=1.0,
|
| 262 |
-
value=0.
|
| 263 |
step=0.1,
|
| 264 |
interactive=True,
|
| 265 |
label="Temperature",
|
|
@@ -268,7 +255,7 @@ with gr.Blocks(title='VideoLLaMA 2 π₯ππ₯', theme=theme, css=block_css) as
|
|
| 268 |
top_p = gr.Slider(
|
| 269 |
minimum=0.0,
|
| 270 |
maximum=1.0,
|
| 271 |
-
value=0.
|
| 272 |
step=0.1,
|
| 273 |
interactive=True,
|
| 274 |
label="Top P",
|
|
@@ -300,16 +287,32 @@ with gr.Blocks(title='VideoLLaMA 2 π₯ππ₯', theme=theme, css=block_css) as
|
|
| 300 |
|
| 301 |
with gr.Row():
|
| 302 |
cur_dir = os.path.dirname(os.path.abspath(__file__))
|
|
|
|
| 303 |
with gr.Column():
|
| 304 |
gr.Examples(
|
| 305 |
examples=[
|
| 306 |
[
|
| 307 |
-
f"{cur_dir}/examples/
|
| 308 |
-
"What
|
| 309 |
],
|
| 310 |
[
|
| 311 |
-
f"{cur_dir}/examples/
|
| 312 |
-
"What
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 313 |
],
|
| 314 |
],
|
| 315 |
inputs=[video, textbox],
|
|
@@ -319,26 +322,26 @@ with gr.Blocks(title='VideoLLaMA 2 π₯ππ₯', theme=theme, css=block_css) as
|
|
| 319 |
examples=[
|
| 320 |
[
|
| 321 |
f"{cur_dir}/examples/00000368.mp4",
|
| 322 |
-
"
|
| 323 |
],
|
| 324 |
[
|
| 325 |
f"{cur_dir}/examples/00003491.mp4",
|
| 326 |
"Where is the loudest instrument?",
|
| 327 |
],
|
| 328 |
],
|
| 329 |
-
inputs=[
|
| 330 |
)
|
| 331 |
with gr.Column():
|
| 332 |
# audio
|
| 333 |
gr.Examples(
|
| 334 |
examples=[
|
| 335 |
[
|
| 336 |
-
f"{cur_dir}/examples/
|
| 337 |
-
"Please describe the audio
|
| 338 |
],
|
| 339 |
[
|
| 340 |
-
f"{cur_dir}/examples/
|
| 341 |
-
"Please describe the audio
|
| 342 |
],
|
| 343 |
],
|
| 344 |
inputs=[audio, textbox],
|
|
@@ -349,22 +352,20 @@ with gr.Blocks(title='VideoLLaMA 2 π₯ππ₯', theme=theme, css=block_css) as
|
|
| 349 |
|
| 350 |
submit_btn.click(
|
| 351 |
generate,
|
| 352 |
-
[
|
| 353 |
-
[
|
| 354 |
|
| 355 |
regenerate_btn.click(
|
| 356 |
regenerate,
|
| 357 |
[message, chatbot],
|
| 358 |
[message, chatbot]).then(
|
| 359 |
generate,
|
| 360 |
-
[
|
| 361 |
-
[
|
| 362 |
|
| 363 |
clear_btn.click(
|
| 364 |
clear_history,
|
| 365 |
[message, chatbot],
|
| 366 |
-
[
|
| 367 |
|
| 368 |
demo.launch(share=False)
|
| 369 |
-
|
| 370 |
-
|
|
|
|
| 22 |
<h5 style="margin: 0;">If this demo please you, please give us a star β on Github or π on this space.</h5>
|
| 23 |
</div>
|
| 24 |
</div>
|
| 25 |
+
|
| 26 |
+
|
| 27 |
<div align="center">
|
| 28 |
<div style="display:flex; gap: 0.25rem; margin-top: 10px;" align="center">
|
| 29 |
<a href="https://github.com/DAMO-NLP-SG/VideoLLaMA2"><img src='https://img.shields.io/badge/Github-VideoLLaMA2-9C276A'></a>
|
|
|
|
| 97 |
|
| 98 |
|
| 99 |
@spaces.GPU(duration=120)
|
| 100 |
+
def generate(image, video, audio, message, chatbot, va_tag, textbox_in, temperature, top_p, max_output_tokens, dtype=torch.float16):
|
| 101 |
data = []
|
|
|
|
| 102 |
|
| 103 |
processor = handler.processor
|
| 104 |
try:
|
|
|
|
| 112 |
else:
|
| 113 |
video_audio = video_audio.to(handler.model.device, dtype=dtype)
|
| 114 |
data.append((video_audio, '<video>'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
elif audio is not None:
|
| 116 |
data.append((processor['audio'](audio).to(handler.model.device, dtype=dtype), '<audio>'))
|
| 117 |
elif image is None and video is None:
|
|
|
|
| 127 |
show_images = ""
|
| 128 |
if image is not None:
|
| 129 |
show_images += f'<img src="./file={image}" style="display: inline-block;width: 250px;max-height: 400px;">'
|
| 130 |
+
if video is not None:
|
| 131 |
show_images += f'<video controls playsinline width="500" style="display: inline-block;" src="./file={video}"></video>'
|
| 132 |
if audio is not None:
|
| 133 |
show_images += f'<audio controls style="display: inline-block;" src="./file={audio}"></audio>'
|
|
|
|
| 141 |
else:
|
| 142 |
previous_image = re.findall(r'<img src="./file=(.+?)"', chatbot[0][0])
|
| 143 |
previous_video = re.findall(r'<video controls playsinline width="500" style="display: inline-block;" src="./file=(.+?)"', chatbot[0][0])
|
|
|
|
| 144 |
previous_audio = re.findall(r'<audio controls style="display: inline-block;" src="./file=(.+?)"', chatbot[0][0])
|
| 145 |
if len(previous_image) > 0:
|
| 146 |
previous_image = previous_image[0]
|
|
|
|
| 154 |
if video is not None and os.path.basename(previous_video) != os.path.basename(video):
|
| 155 |
message.clear()
|
| 156 |
one_turn_chat[0] += "\n" + show_images
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 157 |
elif len(previous_audio) > 0:
|
| 158 |
previous_audio = previous_audio[0]
|
| 159 |
# 2.3 new audio append or pure text input will start a new conversation
|
|
|
|
| 182 |
one_turn_chat[1] = text_en_out
|
| 183 |
chatbot.append(one_turn_chat)
|
| 184 |
|
| 185 |
+
return gr.update(value=image, interactive=True), gr.update(value=video, interactive=True), gr.update(value=audio, interactive=True), message, chatbot
|
| 186 |
|
| 187 |
|
| 188 |
def regenerate(message, chatbot):
|
|
|
|
| 226 |
|
| 227 |
with gr.Row():
|
| 228 |
with gr.Column(scale=3):
|
| 229 |
+
#image = gr.Image(label="Input Image", type="filepath")
|
| 230 |
+
image = None
|
| 231 |
video = gr.Video(label="Input Video")
|
|
|
|
| 232 |
audio = gr.Audio(label="Input Audio", type="filepath")
|
| 233 |
|
| 234 |
with gr.Accordion("Parameters", open=True) as parameter_row:
|
|
|
|
| 244 |
va_tag = gr.Radio(choices=["Audio Vision", "Vision Only", "Audio Only"], value="Audio Vision", label="Select one")
|
| 245 |
|
| 246 |
temperature = gr.Slider(
|
| 247 |
+
minimum=0.1,
|
| 248 |
maximum=1.0,
|
| 249 |
+
value=0.2,
|
| 250 |
step=0.1,
|
| 251 |
interactive=True,
|
| 252 |
label="Temperature",
|
|
|
|
| 255 |
top_p = gr.Slider(
|
| 256 |
minimum=0.0,
|
| 257 |
maximum=1.0,
|
| 258 |
+
value=0.7,
|
| 259 |
step=0.1,
|
| 260 |
interactive=True,
|
| 261 |
label="Top P",
|
|
|
|
| 287 |
|
| 288 |
with gr.Row():
|
| 289 |
cur_dir = os.path.dirname(os.path.abspath(__file__))
|
| 290 |
+
'''
|
| 291 |
with gr.Column():
|
| 292 |
gr.Examples(
|
| 293 |
examples=[
|
| 294 |
[
|
| 295 |
+
f"{cur_dir}/examples/extreme_ironing.jpg",
|
| 296 |
+
"What happens in this image?",
|
| 297 |
],
|
| 298 |
[
|
| 299 |
+
f"{cur_dir}/examples/waterview.jpg",
|
| 300 |
+
"What are the things I should be cautious about when I visit here?",
|
| 301 |
+
],
|
| 302 |
+
],
|
| 303 |
+
inputs=[image, textbox],
|
| 304 |
+
)
|
| 305 |
+
'''
|
| 306 |
+
with gr.Column():
|
| 307 |
+
gr.Examples(
|
| 308 |
+
examples=[
|
| 309 |
+
[
|
| 310 |
+
f"{cur_dir}/examples/WBS4I.mp4",
|
| 311 |
+
"Please describe the video.",
|
| 312 |
+
],
|
| 313 |
+
[
|
| 314 |
+
f"{cur_dir}/examples/sample_demo_1.mp4",
|
| 315 |
+
"Please describe the video.",
|
| 316 |
],
|
| 317 |
],
|
| 318 |
inputs=[video, textbox],
|
|
|
|
| 322 |
examples=[
|
| 323 |
[
|
| 324 |
f"{cur_dir}/examples/00000368.mp4",
|
| 325 |
+
"Please describe the video with audio information.",
|
| 326 |
],
|
| 327 |
[
|
| 328 |
f"{cur_dir}/examples/00003491.mp4",
|
| 329 |
"Where is the loudest instrument?",
|
| 330 |
],
|
| 331 |
],
|
| 332 |
+
inputs=[video, textbox],
|
| 333 |
)
|
| 334 |
with gr.Column():
|
| 335 |
# audio
|
| 336 |
gr.Examples(
|
| 337 |
examples=[
|
| 338 |
[
|
| 339 |
+
f"{cur_dir}/examples/Y--ZHUMfueO0.flac",
|
| 340 |
+
"Please describe the audio.",
|
| 341 |
],
|
| 342 |
[
|
| 343 |
+
f"{cur_dir}/examples/Traffic and pedestrians.wav",
|
| 344 |
+
"Please describe the audio.",
|
| 345 |
],
|
| 346 |
],
|
| 347 |
inputs=[audio, textbox],
|
|
|
|
| 352 |
|
| 353 |
submit_btn.click(
|
| 354 |
generate,
|
| 355 |
+
[image, video, audio, message, chatbot, va_tag, textbox, temperature, top_p, max_output_tokens],
|
| 356 |
+
[image, video, audio, message, chatbot])
|
| 357 |
|
| 358 |
regenerate_btn.click(
|
| 359 |
regenerate,
|
| 360 |
[message, chatbot],
|
| 361 |
[message, chatbot]).then(
|
| 362 |
generate,
|
| 363 |
+
[image, video, audio, message, chatbot, va_tag, textbox, temperature, top_p, max_output_tokens],
|
| 364 |
+
[image, video, audio, message, chatbot])
|
| 365 |
|
| 366 |
clear_btn.click(
|
| 367 |
clear_history,
|
| 368 |
[message, chatbot],
|
| 369 |
+
[image, video, audio, message, chatbot, textbox])
|
| 370 |
|
| 371 |
demo.launch(share=False)
|
|
|
|
|
|
examples/{bird-twitter-car.wav β 1034346401.mp4}
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:08b62a634fe49edc0a19fc53f6ea5cfb345d9b2a6a7047811344c16832dc42b2
|
| 3 |
+
size 1678095
|
examples/{door.of.bar.raining2.wav β Traffic and pedestrians.wav}
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:39d805c8e0e487427d60c47ded7d7cca9b8fa288c1a53c93118b15f68ecf6792
|
| 3 |
+
size 1656254
|
examples/{output_v_1jgsRbGzCls.mp4 β WBS4I.mp4}
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7129dddf8da11c9296845eed65f0016dc67a503972c57500fe9f7c3ad2ee1ff3
|
| 3 |
+
size 1052064
|
examples/Y--ZHUMfueO0.flac
ADDED
|
Binary file (324 kB). View file
|
|
|
examples/desert.jpg
ADDED
|
examples/extreme_ironing.jpg
ADDED
|
examples/sample_demo_1.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fc6562a172eb9cb3c760a3c9992349c1faa2c793c112b7b9e50bd5cb17c2164d
|
| 3 |
+
size 1549315
|
examples/{output_v_3V9tzjyr51I.mp4 β sample_demo_3.mp4}
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:da6126bce64c64a3d6f7ce889fbe15b5f1c2e3f978846351d8c7a79a950b429e
|
| 3 |
+
size 463547
|
examples/sample_demo_9.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f9702694f185e27ae016b85024b367e140cf93a4e3124d072816fd32f2ca0d96
|
| 3 |
+
size 631864
|
examples/waterview.jpg
ADDED
|