HussainLatiff commited on
Commit
014ee66
Β·
verified Β·
1 Parent(s): ba27b4a

Added instructions to perform relavant changes

Browse files
Files changed (1) hide show
  1. app.py +8 -1
app.py CHANGED
@@ -4,6 +4,13 @@ import os
4
  def video_identity(video):
5
  return video
6
 
 
 
 
 
 
 
 
7
  demo = gr.Interface(video_identity,
8
  gr.Video(),
9
  "playable_video",
@@ -12,4 +19,4 @@ demo = gr.Interface(video_identity,
12
  cache_examples=True)
13
 
14
  if __name__ == "__main__":
15
- demo.launch()
 
4
  def video_identity(video):
5
  return video
6
 
7
+ instructions = """
8
+ <b>Instructions:</b><br>
9
+ Step 1: Upload the example video to get the relevant timeframes that require haptics, the text query should be 'explosion' <a href="https://portal.vision.cognitive.azure.com/demo/video-summary-and-frame-locator">Azure Cognitive Services Video Summary and Frame Locator</a> with explosions as the query.<br>
10
+ Step 2: Download the generated audio from <a href="https://phonebrrdemonstration2.blob.core.windows.net/audio3second0001/3_second_explosion_00001.flac">this ai-generated haptic audio</a>.
11
+ Step 3: Mix the Audio using any app of your choice and master the audio with <a href="https://aimastering.com/">ai-mastering program</a>
12
+ """
13
+
14
  demo = gr.Interface(video_identity,
15
  gr.Video(),
16
  "playable_video",
 
19
  cache_examples=True)
20
 
21
  if __name__ == "__main__":
22
+ demo.launch(share=True)