Banuka commited on
Commit
366a31f
Β·
verified Β·
1 Parent(s): 838aaca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -75
app.py CHANGED
@@ -57,10 +57,7 @@ def predict_video(input_video, input_audio=None, input_choice="Explosions"):
57
  userAudioInputFlag = False
58
 
59
  if input_audio is not None:
60
- #audioFileName = "userInputAudio"
61
- #input_audio = AzureBlobStorageAudio.uploadUserAudioToBlobStorage(input_audio, audioFileName)
62
  userAudioInputFlag = True
63
- #return [input_video, f" Using uploaded audio: {audioFileName}"]
64
  else:
65
  if (input_choice == "Explosions"):
66
  input_audio = os.path.join(os.path.dirname(__file__), "audio/1_seconds_haptic_audio.mp3")
@@ -75,16 +72,6 @@ def predict_video(input_video, input_audio=None, input_choice="Explosions"):
75
  input_audio = os.path.join(os.path.dirname(__file__), "audio/5_seconds_haptic_videos.mp3")
76
  print("default selected")
77
 
78
- #return [videoBlobURL, f" Using uploaded audio:"]
79
- #return [input_video, f" Using uploaded audio: {input_audio.name}"]
80
- # IF user uploads audio file: upload audio file to blob storage
81
- # ELSE use default audio file from blob storage
82
-
83
-
84
- # message = "**Placeholder:** Video processing not implemented yet."
85
-
86
- # 2. Analyze video and predict timestamps
87
-
88
  """
89
  Processes the uploaded video (replace with your video analysis logic).
90
 
@@ -95,7 +82,6 @@ def predict_video(input_video, input_audio=None, input_choice="Explosions"):
95
  Returns:
96
  A list containing the processed video and a message string.
97
  """
98
-
99
  responseQueryText = videoAnalysis(videoSASURL, videoSASToken, input_choice)
100
 
101
  # IF method returns error: run analysis again
@@ -104,22 +90,6 @@ def predict_video(input_video, input_audio=None, input_choice="Explosions"):
104
 
105
  AzureBlobStorageVideo.delete_container('useruploadhuggingfacevideo')
106
 
107
- # 3. Use moviepy to add haptics to video
108
-
109
- #install masteringModule dependencies
110
- #os.chdir("masteringModule")
111
- #npminstall = subprocess.run(["npm", "install", "masteringModule/package.json"])
112
- #os.chdir("..")
113
-
114
- # 3.1. Extract audio from video
115
- #extractedAudioPath = extract_audio_from_video(input_video)
116
-
117
- # 3.2. Mix extracted audio with haptic audio
118
-
119
- # Load JSON output
120
- #output_query_response = '{"value":[{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:00:16","end":"00:00:26","best":"00:00:21","relevance":0.4005849361419678},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:00:06","end":"00:00:16","best":"00:00:09","relevance":0.38852864503860474},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:01:42","end":"00:01:58","best":"00:01:43","relevance":0.38718080520629883},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:01:58","end":"00:02:14","best":"00:02:03","relevance":0.3811851143836975},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:00:42","end":"00:00:52","best":"00:00:42","relevance":0.3765566647052765},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:00:26","end":"00:00:42","best":"00:00:28","relevance":0.3718773126602173},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:01:08","end":"00:01:24","best":"00:01:10","relevance":0.3707084357738495},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:01:37","end":"00:01:42","best":"00:01:38","relevance":0.36235538125038147},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:01:29","end":"00:01:37","best":"00:01:33","relevance":0.3606133460998535},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:01:03","end":"00:01:08","best":"00:01:04","relevance":0.3513660728931427},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:00:00","end":"00:00:06","best":"00:00:05","relevance":0.3378048241138458}]}' # JSON response
121
-
122
-
123
  json_data = load_json_output(responseQueryText)
124
 
125
  # Extract audio from the video
@@ -136,10 +106,6 @@ def predict_video(input_video, input_audio=None, input_choice="Explosions"):
136
  finalAudioPath = "audio/finalAudio.mp3"
137
  save_audio(final_audio, finalAudioPath)
138
 
139
- # Apply audio mastering
140
-
141
- #master = subprocess.run(["node", "masteringModule/main.js", "--input", finalAudioPath, "--output", finalAudioPath])
142
-
143
  if (userAudioInputFlag == True):
144
  AzureBlobStorageVideo.delete_container('useruploadhuggingfaceaudio')
145
 
@@ -153,46 +119,8 @@ def predict_video(input_video, input_audio=None, input_choice="Explosions"):
153
  save_video(final_video, "video/final_enhanced_video.mp4")
154
  finalVideoPath = "video/final_enhanced_video.mp4"
155
 
156
- # 3.2.1. modify query response
157
- #hard-coded query response
158
- # output_query_response = '{"value":[{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:00:16","end":"00:00:26","best":"00:00:21","relevance":0.4005849361419678},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:00:06","end":"00:00:16","best":"00:00:09","relevance":0.38852864503860474},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:01:42","end":"00:01:58","best":"00:01:43","relevance":0.38718080520629883},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:01:58","end":"00:02:14","best":"00:02:03","relevance":0.3811851143836975},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:00:42","end":"00:00:52","best":"00:00:42","relevance":0.3765566647052765},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:00:26","end":"00:00:42","best":"00:00:28","relevance":0.3718773126602173},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:01:08","end":"00:01:24","best":"00:01:10","relevance":0.3707084357738495},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:01:37","end":"00:01:42","best":"00:01:38","relevance":0.36235538125038147},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:01:29","end":"00:01:37","best":"00:01:33","relevance":0.3606133460998535},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:01:03","end":"00:01:08","best":"00:01:04","relevance":0.3513660728931427},{"documentId":"sp=r&st=2024-02-09T12:33:24Z&se=2025-08-06T20:33:24Z&spr=https&sv=2022-11-02&sr=b&sig=V%2Fq56JjGcL60r0vt3oAPjzx%2FZMu5%2BJo%2BfjKkJF2ccgo%3D","documentKind":"VideoInterval","start":"00:00:00","end":"00:00:06","best":"00:00:05","relevance":0.3378048241138458}]}' # JSON response
159
- # modifiedQueryResponse = load_json_output(output_query_response)
160
- #
161
- # # 3.2.2. get timestamps of haptics segments
162
- # hapticSegments = get_explosion_segments(modifiedQueryResponse)
163
- #
164
- # # 3.2.3. create final audio
165
- # finalAudio = create_final_audio(extractedAudioPath, hapticSegments)
166
- # finalAudioPath = "audio/finalAudio.mp3"
167
- # save_audio(finalAudio, finalAudioPath)
168
- #
169
- # # 3.3. Master final audio file
170
- # #master = subprocess.run(["node", "masteringModule/main.js", "--input", finalAudioPath, "--output", finalAudioPath])
171
- #
172
- # # 3.4. Prepare video file
173
- # #muteVideo = without_audio(input_video)
174
- # # currentVideoPath = "video/currentVideo.mp4"
175
- # # save_video(input_video, currentVideoPath)
176
- # # muteVideo = VideoFileClip(currentVideoPath)
177
- # # muteVideo = muteVideo.without_audio()
178
- # # mutevideoPath = "video/muteVideo.mp4"
179
- # # save_video(muteVideo, mutevideoPath)
180
- #
181
- # # 3.5. Combine audio with video
182
- # inputVideoPath = input_video
183
- # currentVideoPath = "video/currentVideo.mp4"
184
- # currentVideo = VideoFileClip(currentVideoPath)
185
- #
186
- # finalVideo = combine_video_audio(currentVideo,finalAudio)
187
- # finalVideoPath = "video/finalEnhancedVideo.mp4"
188
- # save_video(finalVideo, finalVideoPath)
189
-
190
  return [finalVideoPath, f"Video enhancement successful"]
191
 
192
- # You can optionally add a progress bar or loading indicator here
193
-
194
-
195
-
196
  css = """
197
  #col-container {
198
  margin: 0 auto;
@@ -261,6 +189,4 @@ with gr.Blocks(css=css) as demo:
261
  outputs=[video_out, text_out],
262
  queue=False
263
  )
264
-
265
-
266
- demo.launch(debug=True)
 
57
  userAudioInputFlag = False
58
 
59
  if input_audio is not None:
 
 
60
  userAudioInputFlag = True
 
61
  else:
62
  if (input_choice == "Explosions"):
63
  input_audio = os.path.join(os.path.dirname(__file__), "audio/1_seconds_haptic_audio.mp3")
 
72
  input_audio = os.path.join(os.path.dirname(__file__), "audio/5_seconds_haptic_videos.mp3")
73
  print("default selected")
74
 
 
 
 
 
 
 
 
 
 
 
75
  """
76
  Processes the uploaded video (replace with your video analysis logic).
77
 
 
82
  Returns:
83
  A list containing the processed video and a message string.
84
  """
 
85
  responseQueryText = videoAnalysis(videoSASURL, videoSASToken, input_choice)
86
 
87
  # IF method returns error: run analysis again
 
90
 
91
  AzureBlobStorageVideo.delete_container('useruploadhuggingfacevideo')
92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  json_data = load_json_output(responseQueryText)
94
 
95
  # Extract audio from the video
 
106
  finalAudioPath = "audio/finalAudio.mp3"
107
  save_audio(final_audio, finalAudioPath)
108
 
 
 
 
 
109
  if (userAudioInputFlag == True):
110
  AzureBlobStorageVideo.delete_container('useruploadhuggingfaceaudio')
111
 
 
119
  save_video(final_video, "video/final_enhanced_video.mp4")
120
  finalVideoPath = "video/final_enhanced_video.mp4"
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  return [finalVideoPath, f"Video enhancement successful"]
123
 
 
 
 
 
124
  css = """
125
  #col-container {
126
  margin: 0 auto;
 
189
  outputs=[video_out, text_out],
190
  queue=False
191
  )
192
+ demo.launch(debug=True)