Files changed (1) hide show
  1. app.py +11 -14
app.py CHANGED
@@ -1,22 +1,19 @@
1
  import gradio as gr
2
  import os
3
 
4
- def video_identity(video):
5
- return video
6
-
7
- instructions = """
8
- <b>Instructions:</b><br>
9
- Step 1: Upload the example video to get the relevant timeframes that require haptics, the text query should be 'explosion' <a href="https://portal.vision.cognitive.azure.com/demo/video-summary-and-frame-locator">Azure Cognitive Services Video Summary and Frame Locator</a> with explosions as the query.<br>
10
- Step 2: Download the generated audio from <a href="https://phonebrrdemonstration2.blob.core.windows.net/audio3second0001/3_second_explosion_00001.flac">this ai-generated haptic audio</a>.
11
- Step 3: Mix the Audio using any app of your choice and master the audio with <a href="https://aimastering.com/">ai-mastering program</a>
12
- """
13
 
14
  demo = gr.Interface(video_identity,
15
- gr.Video(),
16
  "playable_video",
17
- examples=[os.path.join(os.path.dirname(__file__),
18
- "video/test_video.mp4")],
19
- cache_examples=True)
 
 
 
20
 
21
  if __name__ == "__main__":
22
- demo.launch(share=True)
 
1
  import gradio as gr
2
  import os
3
 
4
+ def video_identity(video, text):
5
+ # You can use both the video and the text input in your function
6
+ return video, text
 
 
 
 
 
 
7
 
8
  demo = gr.Interface(video_identity,
9
+ [gr.Video(), gr.Textbox(label="Text 2", info="Text to compare", lines=3, value="")],
10
  "playable_video",
11
+ examples=[[
12
+ os.path.join(os.path.dirname(__file__), "video/test_video.mp4"),
13
+ ""
14
+ ]],
15
+ cache_examples=True
16
+ )
17
 
18
  if __name__ == "__main__":
19
+ demo.launch()