Samuelblue commited on
Commit
a10b5f3
·
verified ·
1 Parent(s): 43da6b0

Update Main Gradio code

Browse files
Files changed (1) hide show
  1. app.py +332 -213
app.py CHANGED
@@ -7,307 +7,426 @@ from pathlib import Path
7
  import time
8
  import aiohttp
9
  import asyncio
 
 
10
 
11
-
12
  # Set true if you're using huggingface inference API API https://huggingface.co/inference-api
13
  API_BACKEND = True
14
- # MODEL = 'facebook/wav2vec2-large-960h-lv60-self'
15
- # MODEL = "facebook/wav2vec2-large-960h"
16
  MODEL = "facebook/wav2vec2-base-960h"
17
- # MODEL = "patrickvonplaten/wav2vec2-large-960h-lv60-self-4-gram"
 
 
 
 
 
 
 
18
  if API_BACKEND:
19
- from dotenv import load_dotenv
20
- import base64
21
- import asyncio
22
  load_dotenv(Path(".env"))
23
-
24
- HF_TOKEN = os.environ["HF_TOKEN"]
 
25
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
26
- API_URL = f'https://api-inference.huggingface.co/models/{MODEL}'
27
-
28
  else:
29
  import torch
30
  from transformers import pipeline
31
 
32
  # is cuda available?
33
- cuda = torch.device(
34
- 'cuda:0') if torch.cuda.is_available() else torch.device('cpu')
35
  device = 0 if torch.cuda.is_available() else -1
36
- speech_recognizer = pipeline(
37
- task="automatic-speech-recognition",
38
- model=f'{MODEL}',
39
- tokenizer=f'{MODEL}',
40
- framework="pt",
41
- device=device,
42
- )
 
 
 
43
 
44
  videos_out_path = Path("./videos_out")
45
  videos_out_path.mkdir(parents=True, exist_ok=True)
46
 
47
- samples_data = sorted(Path('examples').glob('*.json'))
 
48
  SAMPLES = []
49
- for file in samples_data:
50
- with open(file) as f:
51
- sample = json.load(f)
52
- SAMPLES.append(sample)
53
- VIDEOS = list(map(lambda x: [x['video']], SAMPLES))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
- total_inferences_since_reboot = 415
56
- total_cuts_since_reboot = 1539
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
 
 
 
 
58
 
59
- async def speech_to_text(video_file_path):
 
 
 
 
 
 
 
 
60
  """
61
- Takes a video path to convert to audio, transcribe audio channel to text and char timestamps
 
 
 
 
62
 
63
- Using https://huggingface.co/tasks/automatic-speech-recognition pipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  """
65
- global total_inferences_since_reboot
66
- if (video_file_path == None):
67
- raise ValueError("Error no video input")
68
 
69
  video_path = Path(video_file_path)
 
 
 
70
  try:
71
  # convert video to audio 16k using PIPE to audio_memory
72
- audio_memory, _ = ffmpeg.input(video_path).output(
73
- '-', format="wav", ac=1, ar='16k').overwrite_output().global_args('-loglevel', 'quiet').run(capture_stdout=True)
 
 
 
 
 
 
 
74
  except Exception as e:
75
- raise RuntimeError("Error converting video to audio")
 
 
 
76
 
77
- ping("speech_to_text")
78
- last_time = time.time()
79
  if API_BACKEND:
80
- # Using Inference API https://huggingface.co/inference-api
81
- # try twice, because the model must be loaded
82
- for i in range(10):
83
- for tries in range(4):
84
- print(f'Transcribing from API attempt {tries}')
85
- try:
86
- inference_reponse = await query_api(audio_memory)
87
- print(inference_reponse)
88
- transcription = inference_reponse["text"].lower()
89
- timestamps = [[chunk["text"].lower(), chunk["timestamp"][0], chunk["timestamp"][1]]
90
- for chunk in inference_reponse['chunks']]
91
-
92
- total_inferences_since_reboot += 1
93
- print("\n\ntotal_inferences_since_reboot: ",
94
- total_inferences_since_reboot, "\n\n")
95
- return (transcription, transcription, timestamps)
96
- except Exception as e:
97
- print(e)
98
- if 'error' in inference_reponse and 'estimated_time' in inference_reponse:
99
- wait_time = inference_reponse['estimated_time']
100
- print("Waiting for model to load....", wait_time)
101
- # wait for loading model
102
- # 5 seconds plus for certanty
103
- await asyncio.sleep(wait_time + 5.0)
104
- elif 'error' in inference_reponse:
105
- raise RuntimeError("Error Fetching API",
106
- inference_reponse['error'])
107
- else:
108
- break
109
- else:
110
- raise RuntimeError(inference_reponse, "Error Fetching API")
111
- else:
112
 
 
 
 
 
 
 
 
 
 
 
113
  try:
114
- print(f'Transcribing via local model')
115
- output = speech_recognizer(
116
- audio_memory, return_timestamps="char", chunk_length_s=10, stride_length_s=(4, 2))
 
 
 
 
 
 
 
117
 
118
  transcription = output["text"].lower()
119
- timestamps = [[chunk["text"].lower(), chunk["timestamp"][0].tolist(), chunk["timestamp"][1].tolist()]
120
- for chunk in output['chunks']]
121
- total_inferences_since_reboot += 1
 
 
 
 
 
 
 
122
 
123
- print("\n\ntotal_inferences_since_reboot: ",
124
- total_inferences_since_reboot, "\n\n")
125
  return (transcription, transcription, timestamps)
 
126
  except Exception as e:
127
- raise RuntimeError("Error Running inference with local model", e)
128
 
129
 
130
  async def cut_timestamps_to_video(video_in, transcription, text_in, timestamps):
131
  """
132
  Given original video input, text transcript + timestamps,
133
- and edit ext cuts video segments into a single video
134
  """
135
- global total_cuts_since_reboot
 
 
 
 
136
 
137
- video_path = Path(video_in)
138
- video_file_name = video_path.stem
139
- if (video_in == None or text_in == None or transcription == None):
140
- raise ValueError("Inputs undefined")
141
 
142
  d = Differ()
143
  # compare original transcription with edit text
144
- diff_chars = d.compare(transcription, text_in)
145
- # remove all text aditions from diff
146
- filtered = list(filter(lambda x: x[0] != '+', diff_chars))
147
-
148
- # filter timestamps to be removed
149
- # timestamps_to_cut = [b for (a,b) in zip(filtered, timestamps_var) if a[0]== '-' ]
150
- # return diff tokes and cutted video!!
151
-
152
- # groupping character timestamps so there are less cuts
153
- idx = 0
154
- grouped = {}
155
- for (a, b) in zip(filtered, timestamps):
156
- if a[0] != '-':
157
- if idx in grouped:
158
- grouped[idx].append(b)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  else:
160
- grouped[idx] = []
161
- grouped[idx].append(b)
162
- else:
163
- idx += 1
164
-
165
- # after grouping, gets the lower and upter start and time for each group
166
- timestamps_to_cut = [[v[0][1], v[-1][2]] for v in grouped.values()]
167
-
168
- between_str = '+'.join(
169
- map(lambda t: f'between(t,{t[0]},{t[1]})', timestamps_to_cut))
170
-
171
- if timestamps_to_cut:
172
- video_file = ffmpeg.input(video_in)
173
- video = video_file.video.filter(
174
- "select", f'({between_str})').filter("setpts", "N/FRAME_RATE/TB")
175
- audio = video_file.audio.filter(
176
- "aselect", f'({between_str})').filter("asetpts", "N/SR/TB")
177
-
178
- output_video = f'./videos_out/{video_file_name}.mp4'
179
- ffmpeg.concat(video, audio, v=1, a=1).output(
180
- output_video).overwrite_output().global_args('-loglevel', 'quiet').run()
181
- else:
182
- output_video = video_in
183
 
184
- tokens = [(token[2:], token[0] if token[0] != " " else None)
185
- for token in filtered]
186
 
187
- total_cuts_since_reboot += 1
188
- ping("video_cuts")
189
- print("\n\ntotal_cuts_since_reboot: ", total_cuts_since_reboot, "\n\n")
190
- return (tokens, output_video)
191
 
 
 
 
192
 
193
- async def query_api(audio_bytes: bytes):
194
- """
195
- Query for Huggingface Inference API for Automatic Speech Recognition task
196
- """
197
- payload = json.dumps({
198
- "inputs": base64.b64encode(audio_bytes).decode("utf-8"),
199
- "parameters": {
200
- "return_timestamps": "char",
201
- "chunk_length_s": 10,
202
- "stride_length_s": [4, 2]
203
- },
204
- "options": {"use_gpu": False}
205
- }).encode("utf-8")
206
- async with aiohttp.ClientSession() as session:
207
- async with session.post(API_URL, headers=headers, data=payload) as response:
208
- print("API Response: ", response.status)
209
- if response.headers['Content-Type'] == 'application/json':
210
- return await response.json()
211
- elif response.headers['Content-Type'] == 'application/octet-stream':
212
- return await response.read()
213
- elif response.headers['Content-Type'] == 'text/plain':
214
- return await response.text()
215
- else:
216
- raise RuntimeError("Error Fetching API")
217
 
 
 
 
 
 
 
218
 
219
- def ping(name):
220
- url = f'https://huggingface.co/api/telemetry/spaces/radames/edit-video-by-editing-text/{name}'
221
- print("ping: ", url)
222
 
223
- async def req():
224
- async with aiohttp.ClientSession() as session:
225
- async with session.get(url) as response:
226
- print("pong: ", response.status)
227
- asyncio.create_task(req())
228
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229
 
230
- # ---- Gradio Layout -----
231
- video_in = gr.Video(label="Video file", elem_id="video-container")
232
- text_in = gr.Textbox(label="Transcription", lines=10, interactive=True)
233
- video_out = gr.Video(label="Video Out")
234
- diff_out = gr.HighlightedText(label="Cuts Diffs", combine_adjacent=True)
235
- examples = gr.Dataset(components=[video_in], samples=VIDEOS, type="index")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236
 
 
 
237
  css = """
238
  #cut_btn, #reset_btn { align-self:stretch; }
239
- #\\31 3 { max-width: 540px; }
240
  .output-markdown {max-width: 65ch !important;}
241
  #video-container{
242
  max-width: 40rem;
243
  }
244
  """
245
  with gr.Blocks(css=css) as demo:
246
- transcription_var = gr.State()
247
- timestamps_var = gr.State()
248
- with gr.Row():
249
- with gr.Column():
250
- gr.Markdown("""
251
- # Edit Video By Editing Text
252
- This project is a quick proof of concept of a simple video editor where the edits
253
- are made by editing the audio transcription.
254
- Using the [Huggingface Automatic Speech Recognition Pipeline](https://huggingface.co/tasks/automatic-speech-recognition)
255
- with a fine tuned [Wav2Vec2 model using Connectionist Temporal Classification (CTC)](https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self)
256
- you can predict not only the text transcription but also the [character or word base timestamps](https://huggingface.co/docs/transformers/v4.19.2/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline.__call__.return_timestamps)
257
- """)
 
 
 
 
258
 
259
  with gr.Row():
260
-
261
- examples.render()
262
-
263
- def load_example(id):
264
- video = SAMPLES[id]['video']
265
- transcription = SAMPLES[id]['transcription'].lower()
266
- timestamps = SAMPLES[id]['timestamps']
267
-
268
- return (video, transcription, transcription, timestamps)
269
-
270
  examples.click(
271
  load_example,
272
  inputs=[examples],
273
  outputs=[video_in, text_in, transcription_var, timestamps_var],
274
- queue=False)
 
 
275
  with gr.Row():
276
  with gr.Column():
277
  video_in.render()
278
  transcribe_btn = gr.Button("Transcribe Audio")
279
- transcribe_btn.click(speech_to_text, [video_in], [
280
- text_in, transcription_var, timestamps_var])
 
 
 
 
281
 
282
- with gr.Row():
283
- gr.Markdown("""
284
- ### Now edit as text
285
- After running the video transcription, you can make cuts to the text below (only cuts, not additions!)""")
286
 
287
  with gr.Row():
288
  with gr.Column():
289
  text_in.render()
290
  with gr.Row():
291
  cut_btn = gr.Button("Cut to video", elem_id="cut_btn")
292
- # send audio path and hidden variables
293
- cut_btn.click(cut_timestamps_to_video, [
294
- video_in, transcription_var, text_in, timestamps_var], [diff_out, video_out])
 
 
 
295
 
296
  reset_transcription = gr.Button(
297
- "Reset to last trascription", elem_id="reset_btn")
298
  reset_transcription.click(
299
- lambda x: x, transcription_var, text_in)
 
 
 
 
300
  with gr.Column():
301
  video_out.render()
302
  diff_out.render()
303
- with gr.Row():
304
- gr.Markdown("""
305
- #### Video Credits
306
-
307
- 1. [Cooking](https://vimeo.com/573792389)
308
- 1. [Shia LaBeouf "Just Do It"](https://www.youtube.com/watch?v=n2lTxIk_Dr0)
309
- 1. [Mark Zuckerberg & Yuval Noah Harari in Conversation](https://www.youtube.com/watch?v=Boj9eD0Wug8)
310
- """)
311
- demo.queue()
312
  if __name__ == "__main__":
313
- demo.launch(debug=True)
 
 
 
7
  import time
8
  import aiohttp
9
  import asyncio
10
+ import base64
11
+ from dotenv import load_dotenv
12
 
13
+ # --- Configuration ---
14
  # Set true if you're using huggingface inference API API https://huggingface.co/inference-api
15
  API_BACKEND = True
 
 
16
  MODEL = "facebook/wav2vec2-base-960h"
17
+ # MODEL = "facebook/wav2vec2-large-960h"
18
+ # MODEL = "facebook/wav2vec2-large-960h-lv60-self"
19
+ # MODEL = "patrickvonplaten/wav2vec2-large-960h-lv60-self-4-gram" # Example of different model
20
+ API_URL = f'https://api-inference.huggingface.co/models/{MODEL}'
21
+ RETRY_ATTEMPTS = 5 # Increased retry attempts for API calls
22
+ RETRY_DELAY = 5 # Base delay in seconds before retrying API calls
23
+
24
+ # --- Initialization ---
25
  if API_BACKEND:
 
 
 
26
  load_dotenv(Path(".env"))
27
+ HF_TOKEN = os.environ.get("HF_TOKEN")
28
+ if not HF_TOKEN:
29
+ raise ValueError("HF_TOKEN environment variable not set.")
30
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
 
 
31
  else:
32
  import torch
33
  from transformers import pipeline
34
 
35
  # is cuda available?
 
 
36
  device = 0 if torch.cuda.is_available() else -1
37
+ try:
38
+ speech_recognizer = pipeline(
39
+ task="automatic-speech-recognition",
40
+ model=MODEL,
41
+ tokenizer=MODEL,
42
+ framework="pt",
43
+ device=device,
44
+ )
45
+ except Exception as e:
46
+ raise RuntimeError(f"Error initializing local model {MODEL}: {e}")
47
 
48
  videos_out_path = Path("./videos_out")
49
  videos_out_path.mkdir(parents=True, exist_ok=True)
50
 
51
+ # Load samples data
52
+ samples_data_files = sorted(Path('examples').glob('*.json'))
53
  SAMPLES = []
54
+ for file in samples_data_files:
55
+ try:
56
+ with open(file, 'r') as f:
57
+ sample = json.load(f)
58
+ SAMPLES.append(sample)
59
+ except (json.JSONDecodeError, FileNotFoundError) as e:
60
+ print(f"Error loading sample file {file}: {e}")
61
+
62
+ VIDEOS = [[sample['video']] for sample in SAMPLES if 'video' in sample]
63
+
64
+ # --- Helper Functions ---
65
+ async def query_api(audio_bytes: bytes):
66
+ """
67
+ Query the Hugging Face Inference API for Automatic Speech Recognition.
68
+ Includes retry logic with exponential backoff.
69
+ """
70
+ payload = json.dumps({
71
+ "inputs": base64.b64encode(audio_bytes).decode("utf-8"),
72
+ "parameters": {
73
+ "return_timestamps": "char",
74
+ "chunk_length_s": 10,
75
+ "stride_length_s": [4, 2]
76
+ },
77
+ "options": {"use_gpu": False} # Set to True if you have a GPU and want to use it
78
+ }).encode("utf-8")
79
 
80
+ async with aiohttp.ClientSession() as session:
81
+ for attempt in range(RETRY_ATTEMPTS):
82
+ print(f'Transcribing from API attempt {attempt + 1}/{RETRY_ATTEMPTS}')
83
+ try:
84
+ async with session.post(API_URL, headers=headers, data=payload) as response:
85
+ print("API Response Status:", response.status)
86
+ content_type = response.headers.get('Content-Type', '')
87
+
88
+ if response.status == 200 and 'application/json' in content_type:
89
+ return await response.json()
90
+ elif response.status != 200 and 'application/json' in content_type:
91
+ error_response = await response.json()
92
+ if 'error' in error_response and 'estimated_time' in error_response:
93
+ wait_time = error_response['estimated_time']
94
+ print(f"Model loading, waiting for {wait_time} seconds...")
95
+ await asyncio.sleep(wait_time + RETRY_DELAY) # Wait time + buffer
96
+ elif 'error' in error_response:
97
+ raise RuntimeError(f"API Error: {error_response['error']}")
98
+ else:
99
+ raise RuntimeError(f"Unknown API Error: {error_response}")
100
+ else:
101
+ response_text = await response.text()
102
+ raise RuntimeError(f"Unexpected API response format (Status: {response.status}, Content-Type: {content_type}): {response_text}")
103
 
104
+ except aiohttp.ClientError as e:
105
+ print(f"AIOHTTP Client Error during API call: {e}")
106
+ except RuntimeError as e:
107
+ print(f"Runtime error during API call: {e}")
108
 
109
+ if attempt < RETRY_ATTEMPTS - 1:
110
+ wait_time = RETRY_DELAY * (2 ** attempt) # Exponential backoff
111
+ print(f"Retrying in {wait_time} seconds...")
112
+ await asyncio.sleep(wait_time)
113
+
114
+ raise RuntimeError(f"Failed to get transcription after {RETRY_ATTEMPTS} attempts.")
115
+
116
+
117
+ def ping_telemetry(name: str):
118
  """
119
+ Send a telemetry ping to Hugging Face Spaces.
120
+ This is fire-and-forget and doesn't affect the main process flow.
121
+ """
122
+ url = f'https://huggingface.co/api/telemetry/spaces/radames/edit-video-by-editing-text/{name}'
123
+ print(f"Pinging telemetry: {url}")
124
 
125
+ async def send_ping():
126
+ try:
127
+ async with aiohttp.ClientSession() as session:
128
+ async with session.get(url) as response:
129
+ print(f"Telemetry pong: {response.status}")
130
+ except aiohttp.ClientError as e:
131
+ print(f"Failed to send telemetry ping: {e}")
132
+ # Using asyncio.run_coroutine_threadsafe might be safer in a threaded Gradio environment,
133
+ # but requires managing an event loop in a separate thread.
134
+ # For simplicity here, we'll use create_task assuming an event loop is running (Gradio handles this).
135
+ asyncio.create_task(send_ping())
136
+
137
+
138
+ # --- Main Gradio Functions ---
139
+ async def speech_to_text(video_file_path):
140
+ """
141
+ Takes a video path to convert to audio, transcribe audio channel to text and char timestamps.
142
  """
143
+ if video_file_path is None:
144
+ raise gr.Error("Error: No video input provided.")
 
145
 
146
  video_path = Path(video_file_path)
147
+ if not video_path.exists():
148
+ raise gr.Error(f"Error: Video file not found at {video_file_path}")
149
+
150
  try:
151
  # convert video to audio 16k using PIPE to audio_memory
152
+ # Use asyncio-compatible way or run in a separate thread if ffmpeg-python is blocking
153
+ loop = asyncio.get_running_loop()
154
+ audio_memory, _ = await loop.run_in_executor(
155
+ None, lambda: ffmpeg.input(video_path).output(
156
+ '-', format="wav", ac=1, ar='16k').overwrite_output().global_args('-loglevel', 'quiet').run(capture_stdout=True)
157
+ )
158
+
159
+ except ffmpeg.Error as e:
160
+ raise gr.Error(f"Error converting video to audio: {e.stderr.decode()}")
161
  except Exception as e:
162
+ raise gr.Error(f"An unexpected error occurred during audio conversion: {e}")
163
+
164
+
165
+ ping_telemetry("speech_to_text")
166
 
 
 
167
  if API_BACKEND:
168
+ try:
169
+ inference_response = await query_api(audio_memory)
170
+ print("Inference Response:", inference_response)
171
+ if not isinstance(inference_response, dict) or 'text' not in inference_response or 'chunks' not in inference_response:
172
+ raise RuntimeError(f"Unexpected API response structure: {inference_response}")
173
+
174
+ transcription = inference_response["text"].lower()
175
+ # Ensure timestamps have the correct structure and handle potential None values
176
+ timestamps = [[chunk.get("text", "").lower(), chunk.get("timestamp", [None, None])[0], chunk.get("timestamp", [None, None])[1]]
177
+ for chunk in inference_response.get('chunks', []) if isinstance(chunk, dict)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
+ # Filter out timestamps with None values if necessary, or handle them downstream
180
+ timestamps = [ts for ts in timestamps if ts[1] is not None and ts[2] is not None]
181
+
182
+
183
+ return (transcription, transcription, timestamps)
184
+
185
+ except Exception as e:
186
+ raise gr.Error(f"Error fetching transcription from API: {e}")
187
+
188
+ else:
189
  try:
190
+ print(f'Transcribing via local model {MODEL}')
191
+ # Run blocking model inference in an executor
192
+ loop = asyncio.get_running_loop()
193
+ output = await loop.run_in_executor(
194
+ None, lambda: speech_recognizer(
195
+ audio_memory, return_timestamps="char", chunk_length_s=10, stride_length_s=(4, 2))
196
+ )
197
+
198
+ if not isinstance(output, dict) or 'text' not in output or 'chunks' not in output:
199
+ raise RuntimeError(f"Unexpected model output structure: {output}")
200
 
201
  transcription = output["text"].lower()
202
+ # Ensure timestamps have the correct structure and handle potential None/list values
203
+ timestamps = [[chunk.get("text", "").lower(),
204
+ chunk.get("timestamp", [None, None])[0] if not isinstance(chunk.get("timestamp", [None, None])[0], list) else chunk.get("timestamp", [None, None])[0][0],
205
+ chunk.get("timestamp", [None, None])[1] if not isinstance(chunk.get("timestamp", [None, None])[1], list) else chunk.get("timestamp", [None, None])[1][0]
206
+ ]
207
+ for chunk in output.get('chunks', []) if isinstance(chunk, dict)]
208
+
209
+ # Filter out timestamps with None values if necessary, or handle them downstream
210
+ timestamps = [ts for ts in timestamps if ts[1] is not None and ts[2] is not None]
211
+
212
 
 
 
213
  return (transcription, transcription, timestamps)
214
+
215
  except Exception as e:
216
+ raise gr.Error(f"Error running inference with local model: {e}")
217
 
218
 
219
  async def cut_timestamps_to_video(video_in, transcription, text_in, timestamps):
220
  """
221
  Given original video input, text transcript + timestamps,
222
+ and edited text cuts video segments into a single video
223
  """
224
+ if video_in is None or text_in is None or transcription is None or timestamps is None:
225
+ raise gr.Error("Inputs undefined. Please provide video, transcription, and edited text.")
226
+
227
+ if not Path(video_in).exists():
228
+ raise gr.Error(f"Error: Video file not found at {video_in}")
229
 
 
 
 
 
230
 
231
  d = Differ()
232
  # compare original transcription with edit text
233
+ diff_chars = list(d.compare(transcription, text_in))
234
+
235
+ # Map filtered characters back to original timestamps
236
+ # This requires careful indexing or alignment
237
+ # A more robust approach might involve aligning the diff output with the original timestamps
238
+ # based on character positions. For simplicity here, we'll assume a direct mapping after filtering
239
+ # which might not be accurate if additions/deletions significantly alter the text structure.
240
+ # A better approach would be to process the diff and the original timestamps in parallel.
241
+
242
+ # Let's refine the logic to align diff with timestamps more accurately.
243
+ # We'll iterate through the diff and the timestamps simultaneously.
244
+ filtered_timestamps = []
245
+ timestamp_idx = 0
246
+ for diff_line in diff_chars:
247
+ # Lines starting with '-' are deletions, '+' are additions, '?' are changes (we ignore), ' ' are unchanged.
248
+ if diff_line.startswith('-') or diff_line.startswith(' '):
249
+ # If it's a deletion or unchanged, it corresponds to an original timestamp
250
+ if timestamp_idx < len(timestamps):
251
+ filtered_timestamps.append((diff_line, timestamps[timestamp_idx]))
252
+ timestamp_idx += 1
253
+ # Additions ('+') do not correspond to original timestamps, so we skip incrementing timestamp_idx
254
+
255
+ # filter timestamps to be removed (those marked with '-')
256
+ timestamps_to_keep = [ts_info for diff_line, ts_info in filtered_timestamps if not diff_line.startswith('-')]
257
+
258
+
259
+ # groupping character timestamps to keep into continuous segments
260
+ grouped_segments = []
261
+ if timestamps_to_keep:
262
+ current_segment = [timestamps_to_keep[0]]
263
+ for i in range(1, len(timestamps_to_keep)):
264
+ # Check if the current timestamp's start time is close to the previous timestamp's end time
265
+ # This threshold might need adjustment based on the granularity of timestamps
266
+ if timestamps_to_keep[i][1] - current_segment[-1][2] < 0.1: # 0.1 seconds threshold
267
+ current_segment.append(timestamps_to_keep[i])
268
  else:
269
+ grouped_segments.append(current_segment)
270
+ current_segment = [timestamps_to_keep[i]]
271
+ grouped_segments.append(current_segment) # Add the last segment
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
272
 
273
+ # after grouping, gets the lower start and upper end time for each group
274
+ cut_intervals = [[segment[0][1], segment[-1][2]] for segment in grouped_segments]
275
 
 
 
 
 
276
 
277
+ video_path = Path(video_in)
278
+ video_file_name = video_path.stem
279
+ output_video_path = videos_out_path / f"{video_file_name}_cut.mp4" # Use _cut suffix to avoid overwriting original
280
 
281
+ if cut_intervals:
282
+ input_video_stream = ffmpeg.input(video_in)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
283
 
284
+ # Create select filters for video and audio based on cut intervals
285
+ video_filters = []
286
+ audio_filters = []
287
+ for i, interval in enumerate(cut_intervals):
288
+ video_filters.append(f'select=\'between(t,{interval[0]},{interval[1]})\'')
289
+ audio_filters.append(f'aselect=\'between(t,{interval[0]},{interval[1]})\'')
290
 
291
+ # Join filters with commas and add setpts
292
+ video_filter_str = ','.join(video_filters) + ',setpts=N/FRAME_RATE/TB'
293
+ audio_filter_str = ','.join(audio_filters) + ',asetpts=N/SR/TB'
294
 
295
+ video_stream = input_video_stream.video.filter_complex(video_filter_str)
296
+ audio_stream = input_video_stream.audio.filter_complex(audio_filter_str)
 
 
 
297
 
298
+ try:
299
+ # Use asyncio-compatible way or run in a separate thread
300
+ loop = asyncio.get_running_loop()
301
+ await loop.run_in_executor(
302
+ None, lambda: ffmpeg.concat(video_stream, audio_stream, v=1, a=1).output(
303
+ str(output_video_path), preset='fast', crf=23 # Use reasonable encoding settings
304
+ ).overwrite_output().global_args('-loglevel', 'quiet').run()
305
+ )
306
+
307
+ except ffmpeg.Error as e:
308
+ raise gr.Error(f"Error cutting video: {e.stderr.decode()}")
309
+ except Exception as e:
310
+ raise gr.Error(f"An unexpected error occurred during video cutting: {e}")
311
 
312
+ else:
313
+ # If no intervals to keep, output an empty video or handle as an error
314
+ # For now, let's return the original video path and indicate no cuts were made.
315
+ # Depending on requirements, creating an empty video might be better.
316
+ output_video_path = Path(video_in) # No cuts, so output is the original video
317
+ print("No text was kept, returning original video.")
318
+
319
+
320
+ # Generate diff output for display
321
+ # The diff_chars list already contains the diff with markers ('-', '+', ' ')
322
+ # We can directly use this for the highlighted text output
323
+ diff_output_tokens = [(token[2:], token[0] if token[0] != ' ' else None)
324
+ for token in diff_chars]
325
+
326
+ ping_telemetry("video_cuts")
327
+
328
+ return (diff_output_tokens, str(output_video_path))
329
+
330
+
331
+ def load_example(id):
332
+ """Loads example video and transcription."""
333
+ if 0 <= id < len(SAMPLES):
334
+ sample = SAMPLES[id]
335
+ video = sample.get('video')
336
+ transcription = sample.get('transcription', '').lower()
337
+ timestamps = sample.get('timestamps', [])
338
+ if video is None:
339
+ raise gr.Error(f"Example at index {id} is missing video path.")
340
+ return (video, transcription, transcription, timestamps)
341
+ else:
342
+ raise gr.Error(f"Invalid example index: {id}")
343
 
344
+
345
+ # --- Gradio Layout ---
346
  css = """
347
  #cut_btn, #reset_btn { align-self:stretch; }
348
+ #\\31 3 { max-width: 540px; } /* Consider making this more general or dynamic */
349
  .output-markdown {max-width: 65ch !important;}
350
  #video-container{
351
  max-width: 40rem;
352
  }
353
  """
354
  with gr.Blocks(css=css) as demo:
355
+ # Using States to hold transcription and timestamps across interactions
356
+ transcription_var = gr.State(value="")
357
+ timestamps_var = gr.State(value=[])
358
+ video_in = gr.Video(label="Video file", elem_id="video-container")
359
+ text_in = gr.Textbox(label="Transcription", lines=10, interactive=True)
360
+ video_out = gr.Video(label="Video Out", interactive=False) # Output video should not be edited directly
361
+ diff_out = gr.HighlightedText(label="Cuts Diffs", combine_adjacent=True, show_legend=True) # Added legend
362
+
363
+ gr.Markdown("""
364
+ # Edit Video By Editing Text
365
+ This project is a quick proof of concept of a simple video editor where the edits
366
+ are made by editing the audio transcription.
367
+ Using the [Huggingface Automatic Speech Recognition Pipeline](https://huggingface.co/tasks/automatic-speech-recognition)
368
+ with a fine tuned [Wav2Vec2 model using Connectionist Temporal Classification (CTC)](https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self)
369
+ you can predict not only the text transcription but also the [character or word base timestamps](https://huggingface.co/docs/transformers/v4.19.2/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline.__call__.return_timestamps)
370
+ """)
371
 
372
  with gr.Row():
373
+ # Examples section
374
+ examples = gr.Dataset(components=[video_in], samples=VIDEOS, type="index", label="Examples")
 
 
 
 
 
 
 
 
375
  examples.click(
376
  load_example,
377
  inputs=[examples],
378
  outputs=[video_in, text_in, transcription_var, timestamps_var],
379
+ queue=False # Set to False if you want immediate loading without waiting in queue
380
+ )
381
+
382
  with gr.Row():
383
  with gr.Column():
384
  video_in.render()
385
  transcribe_btn = gr.Button("Transcribe Audio")
386
+ transcribe_btn.click(
387
+ speech_to_text,
388
+ inputs=[video_in],
389
+ outputs=[text_in, transcription_var, timestamps_var]
390
+ # No queue=False here as transcription can take time
391
+ )
392
 
393
+ gr.Markdown("""
394
+ ### Now edit as text
395
+ After running the video transcription, you can make cuts to the text below (only cuts, not additions!)""")
 
396
 
397
  with gr.Row():
398
  with gr.Column():
399
  text_in.render()
400
  with gr.Row():
401
  cut_btn = gr.Button("Cut to video", elem_id="cut_btn")
402
+ cut_btn.click(
403
+ cut_timestamps_to_video,
404
+ inputs=[video_in, transcription_var, text_in, timestamps_var],
405
+ outputs=[diff_out, video_out]
406
+ # No queue=False here as video cutting can take time
407
+ )
408
 
409
  reset_transcription = gr.Button(
410
+ "Reset to last transcription", elem_id="reset_btn")
411
  reset_transcription.click(
412
+ lambda x: x, # Simple lambda to return the input state
413
+ inputs=[transcription_var],
414
+ outputs=[text_in],
415
+ queue=False # Immediate reset
416
+ )
417
  with gr.Column():
418
  video_out.render()
419
  diff_out.render()
420
+
421
+ gr.Markdown("""
422
+ #### Video Credits
423
+ 1. [Cooking](https://vimeo.com/573792389)
424
+ 2. [Shia LaBeouf "Just Do It"](https://www.youtube.com/watch?v=n2lTxIk_Dr0)
425
+ 3. [Mark Zuckerberg & Yuval Noah Harari in Conversation](https://www.youtube.com/watch?v=Boj9eD0Wug8)
426
+ """)
427
+
428
+ demo.queue() # Enable queuing for handling multiple users
429
  if __name__ == "__main__":
430
+ # debug=True is useful during development
431
+ # share=True to create a public link (use cautiously)
432
+ demo.launch(debug=True)