HussainLatiff commited on
Commit
9c3daa1
Β·
verified Β·
1 Parent(s): 3968373

Update app.py

Browse files

Revert previous change

Files changed (1) hide show
  1. app.py +11 -13
app.py CHANGED
@@ -1,19 +1,17 @@
1
- import gradio as gr
2
- import os
3
 
4
- def video_identity(video, text):
5
- # You can use both the video and the text input in your function
6
- return video, text
 
 
 
7
 
8
  demo = gr.Interface(video_identity,
9
- [gr.Video(), gr.Textbox(label="Text 2", info="Text to compare", lines=3, value="")],
10
  "playable_video",
11
- examples=[[
12
- os.path.join(os.path.dirname(__file__), "video/test_video.mp4"),
13
- ""
14
- ]],
15
- cache_examples=True
16
- )
17
 
18
  if __name__ == "__main__":
19
- demo.launch()
 
1
+ def video_identity(video):
2
+ return video
3
 
4
+ instructions = """
5
+ <b>Instructions:</b><br>
6
+ Step 1: Upload the example video to get the relevant timeframes that require haptics, the text query should be 'explosion' <a href="https://portal.vision.cognitive.azure.com/demo/video-summary-and-frame-locator">Azure Cognitive Services Video Summary and Frame Locator</a> with explosions as the query.<br>
7
+ Step 2: Download the generated audio from <a href="https://phonebrrdemonstration2.blob.core.windows.net/audio3second0001/3_second_explosion_00001.flac">this ai-generated haptic audio</a>.
8
+ Step 3: Mix the Audio using any app of your choice and master the audio with <a href="https://aimastering.com/">ai-mastering program</a>
9
+ """
10
 
11
  demo = gr.Interface(video_identity,
12
+ gr.Video(),
13
  "playable_video",
14
+ cache_examples=True)
 
 
 
 
 
15
 
16
  if __name__ == "__main__":
17
+ demo.launch(share=True)