|
|
from nemo.collections.asr.models import ASRModel |
|
|
import torch |
|
|
import gradio as gr |
|
|
import spaces |
|
|
import gc |
|
|
from pathlib import Path |
|
|
from pydub import AudioSegment |
|
|
import numpy as np |
|
|
import os |
|
|
import tempfile |
|
|
import gradio.themes as gr_themes |
|
|
import csv |
|
|
from transformers.pipelines import pipeline |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
MODEL_NAME="nvidia/parakeet-tdt-0.6b-v2" |
|
|
|
|
|
|
|
|
model = ASRModel.from_pretrained(model_name=MODEL_NAME) |
|
|
model.eval() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_audio_segment(audio_path, start_second, end_second): |
|
|
""" |
|
|
Extract a segment of audio from a given audio file. |
|
|
Parameters: |
|
|
audio_path (str): Path to the audio file to process |
|
|
start_second (float): Start time of the segment in seconds |
|
|
end_second (float): End time of the segment in seconds |
|
|
Returns: |
|
|
tuple or None: A tuple containing (frame_rate, samples) where: |
|
|
- frame_rate (int): The sample rate of the audio |
|
|
- samples (numpy.ndarray): The audio samples as a numpy array |
|
|
Returns None if there's an error processing the audio |
|
|
""" |
|
|
if not audio_path or not Path(audio_path).exists(): |
|
|
print(f"Warning: Audio path '{audio_path}' not found or invalid for clipping.") |
|
|
return None |
|
|
try: |
|
|
start_ms = int(start_second * 1000) |
|
|
end_ms = int(end_second * 1000) |
|
|
|
|
|
start_ms = max(0, start_ms) |
|
|
if end_ms <= start_ms: |
|
|
print(f"Warning: End time ({end_second}s) is not after start time ({start_second}s). Adjusting end time.") |
|
|
end_ms = start_ms + 100 |
|
|
|
|
|
audio = AudioSegment.from_file(audio_path) |
|
|
clipped_audio = audio[start_ms:end_ms] |
|
|
|
|
|
samples = np.array(clipped_audio.get_array_of_samples()) |
|
|
if clipped_audio.channels == 2: |
|
|
samples = samples.reshape((-1, 2)).mean(axis=1).astype(samples.dtype) |
|
|
|
|
|
frame_rate = clipped_audio.frame_rate |
|
|
if frame_rate <= 0: |
|
|
print(f"Warning: Invalid frame rate ({frame_rate}) detected for clipped audio.") |
|
|
frame_rate = audio.frame_rate |
|
|
|
|
|
if samples.size == 0: |
|
|
print(f"Warning: Clipped audio resulted in empty samples array ({start_second}s to {end_second}s).") |
|
|
return None |
|
|
|
|
|
return (frame_rate, samples) |
|
|
except FileNotFoundError: |
|
|
print(f"Error: Audio file not found at path: {audio_path}") |
|
|
return None |
|
|
except Exception as e: |
|
|
print(f"Error clipping audio {audio_path} from {start_second}s to {end_second}s: {e}") |
|
|
return None |
|
|
|
|
|
@spaces.GPU |
|
|
@spaces.GPU |
|
|
def get_transcripts_and_raw_times(audio_path): |
|
|
if not audio_path: |
|
|
gr.Error("No audio file path provided for transcription.", duration=None) |
|
|
return [], [], None, gr.DownloadButton(visible=False) |
|
|
|
|
|
original_path_name = Path(audio_path).name |
|
|
try: |
|
|
gr.Info(f"Loading audio: {original_path_name}", duration=2) |
|
|
full_audio = AudioSegment.from_file(audio_path) |
|
|
except Exception as load_e: |
|
|
gr.Error(f"Failed to load audio file {original_path_name}: {load_e}", duration=None) |
|
|
return [["Error", "Error", "Load failed"]], [[0.0, 0.0]], audio_path, gr.DownloadButton(visible=False) |
|
|
|
|
|
|
|
|
if full_audio.frame_rate != 16000: |
|
|
full_audio = full_audio.set_frame_rate(16000) |
|
|
if full_audio.channels != 1: |
|
|
full_audio = full_audio.set_channels(1) |
|
|
|
|
|
chunk_duration_ms = 5 * 60 * 1000 |
|
|
total_duration_ms = len(full_audio) |
|
|
total_chunks = (total_duration_ms + chunk_duration_ms - 1) // chunk_duration_ms |
|
|
|
|
|
vis_data = [] |
|
|
raw_times_data = [] |
|
|
|
|
|
model.to(device) |
|
|
|
|
|
for i, start_ms in enumerate(range(0, total_duration_ms, chunk_duration_ms), start=1): |
|
|
end_ms = min(start_ms + chunk_duration_ms, total_duration_ms) |
|
|
chunk = full_audio[start_ms:end_ms] |
|
|
|
|
|
gr.Info(f"Transcribing chunk {i} of {total_chunks} ({start_ms/1000:.0f}s to {end_ms/1000:.0f}s)...", duration=3) |
|
|
|
|
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_wav: |
|
|
chunk.export(temp_wav.name, format="wav") |
|
|
temp_wav_path = temp_wav.name |
|
|
|
|
|
try: |
|
|
output = model.transcribe([temp_wav_path], timestamps=True) |
|
|
if not output or not output[0].timestamp or 'segment' not in output[0].timestamp: |
|
|
continue |
|
|
|
|
|
for ts in output[0].timestamp['segment']: |
|
|
abs_start = ts['start'] + (start_ms / 1000.0) |
|
|
abs_end = ts['end'] + (start_ms / 1000.0) |
|
|
vis_data.append([f"{abs_start:.2f}", f"{abs_end:.2f}", ts['segment']]) |
|
|
raw_times_data.append([abs_start, abs_end]) |
|
|
except Exception as e: |
|
|
gr.Warning(f"Chunk {i} failed: {e}", duration=3) |
|
|
finally: |
|
|
os.remove(temp_wav_path) |
|
|
|
|
|
model.cpu() |
|
|
gc.collect() |
|
|
if device == "cuda": |
|
|
torch.cuda.empty_cache() |
|
|
|
|
|
|
|
|
button_update = gr.DownloadButton(visible=False) |
|
|
try: |
|
|
csv_headers = ["Start (s)", "End (s)", "Segment"] |
|
|
temp_csv_file = tempfile.NamedTemporaryFile(delete=False, suffix=".csv", mode='w', newline='', encoding='utf-8') |
|
|
writer = csv.writer(temp_csv_file) |
|
|
writer.writerow(csv_headers) |
|
|
writer.writerows(vis_data) |
|
|
csv_file_path = temp_csv_file.name |
|
|
temp_csv_file.close() |
|
|
button_update = gr.DownloadButton(value=csv_file_path, visible=True) |
|
|
except Exception as csv_e: |
|
|
gr.Error(f"Failed to create transcript CSV file: {csv_e}", duration=None) |
|
|
|
|
|
gr.Info("Transcription complete.", duration=2) |
|
|
return vis_data, raw_times_data, audio_path, button_update |
|
|
|
|
|
@spaces.GPU |
|
|
def play_segment(evt: gr.SelectData, raw_ts_list, current_audio_path): |
|
|
""" |
|
|
Play a selected segment from the transcription results. |
|
|
Parameters: |
|
|
evt (gr.SelectData): Gradio select event containing the index of selected segment |
|
|
raw_ts_list (list): List of [start, end] timestamps for all segments |
|
|
current_audio_path (str): Path to the current audio file being processed |
|
|
Returns: |
|
|
gr.Audio: Gradio Audio component containing the selected segment for playback |
|
|
Notes: |
|
|
- Extracts and plays the audio segment corresponding to the selected transcription |
|
|
- Returns None if segment extraction fails or inputs are invalid |
|
|
""" |
|
|
if not isinstance(raw_ts_list, list): |
|
|
print(f"Warning: raw_ts_list is not a list ({type(raw_ts_list)}). Cannot play segment.") |
|
|
return gr.Audio(value=None, label="Selected Segment") |
|
|
|
|
|
if not current_audio_path: |
|
|
print("No audio path available to play segment from.") |
|
|
return gr.Audio(value=None, label="Selected Segment") |
|
|
|
|
|
selected_index = evt.index[0] |
|
|
|
|
|
if selected_index < 0 or selected_index >= len(raw_ts_list): |
|
|
print(f"Invalid index {selected_index} selected for list of length {len(raw_ts_list)}.") |
|
|
return gr.Audio(value=None, label="Selected Segment") |
|
|
|
|
|
if not isinstance(raw_ts_list[selected_index], (list, tuple)) or len(raw_ts_list[selected_index]) != 2: |
|
|
print(f"Warning: Data at index {selected_index} is not in the expected format [start, end].") |
|
|
return gr.Audio(value=None, label="Selected Segment") |
|
|
|
|
|
start_time_s, end_time_s = raw_ts_list[selected_index] |
|
|
|
|
|
print(f"Attempting to play segment: {current_audio_path} from {start_time_s:.2f}s to {end_time_s:.2f}s") |
|
|
|
|
|
segment_data = get_audio_segment(current_audio_path, start_time_s, end_time_s) |
|
|
|
|
|
if segment_data: |
|
|
print("Segment data retrieved successfully.") |
|
|
return gr.Audio(value=segment_data, autoplay=True, label=f"Segment: {start_time_s:.2f}s - {end_time_s:.2f}s", interactive=False) |
|
|
else: |
|
|
print("Failed to get audio segment data.") |
|
|
return gr.Audio(value=None, label="Selected Segment") |
|
|
|
|
|
article = ( |
|
|
"<div style='font-size: 1.1em;'>" |
|
|
"<p>AudioDog uses <code><a href='https://huggingface.co/nvidia/parakeet-tdt-0.6b-v2'>parakeet-tdt-0.6b-v2</a></code>.</p>" |
|
|
"<p><strong style='color: red; font-size: 1.2em;'>Key Features:</strong></p>" |
|
|
"<ul>" |
|
|
"<li>Automatic punctuation and capitalization</li>" |
|
|
"<li>Accurate word-level timestamps (click on a segment in the table below to play it!)</li>" |
|
|
"<li>Efficiently transcribes long audio segments by chunking them into smaller segments and stitching them together when done.</li>" |
|
|
"<li>MP3 support for audio input and output, works well on downloaded YouTube videos.</li>" |
|
|
"</ul>" |
|
|
"</div>" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
nvidia_theme = gr_themes.Default( |
|
|
primary_hue=gr_themes.Color( |
|
|
c50="#E5F1D9", |
|
|
c100="#CEE3B3", |
|
|
c200="#B5D58C", |
|
|
c300="#9CC766", |
|
|
c400="#84B940", |
|
|
c500="#76B900", |
|
|
c600="#68A600", |
|
|
c700="#5A9200", |
|
|
c800="#4C7E00", |
|
|
c900="#3E6A00", |
|
|
c950="#2F5600" |
|
|
), |
|
|
neutral_hue="gray", |
|
|
font=[gr_themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"], |
|
|
).set() |
|
|
|
|
|
|
|
|
|
|
|
def get_full_transcript(vis_data): |
|
|
if not vis_data: |
|
|
return "" |
|
|
return " ".join([row[2] for row in vis_data if len(row) == 3]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks(theme='nvidia-theme') as demo: |
|
|
model_display_name = MODEL_NAME.split('/')[-1] if '/' in MODEL_NAME else MODEL_NAME |
|
|
|
|
|
|
|
|
description_html = f""" |
|
|
<div style='display: flex; align-items: flex-start;'> |
|
|
<img src='https://huggingface.co/spaces/LT4Ryan/AudioDog/resolve/main/pics/AD.jpg' style='width: 75%; max-width: 300px; margin-right: 20px; float: left;' alt='AudioDog logo'> |
|
|
<div> |
|
|
<h1 style='text-align: left;'>AudioDog, powered by {model_display_name}</h1> |
|
|
{article} |
|
|
</div> |
|
|
</div> |
|
|
""" |
|
|
with gr.Row(): |
|
|
|
|
|
with gr.Column(scale=2): |
|
|
gr.HTML(description_html) |
|
|
with gr.Tabs(): |
|
|
with gr.TabItem("Audio File"): |
|
|
file_input = gr.Audio(sources=["upload"], type="filepath", label="Upload Audio File") |
|
|
file_transcribe_btn = gr.Button("Transcribe Uploaded File", variant="primary") |
|
|
with gr.TabItem("Microphone"): |
|
|
mic_input = gr.Audio(sources=["microphone"], type="filepath", label="Record Audio") |
|
|
mic_transcribe_btn = gr.Button("Transcribe Microphone Input", variant="primary") |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
transcript_box = gr.Textbox(label="Full Transcript", lines=15, interactive=False) |
|
|
|
|
|
current_audio_path_state = gr.State(None) |
|
|
raw_timestamps_list_state = gr.State([]) |
|
|
vis_data_state = gr.State([]) |
|
|
transcript_state = gr.State("") |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
with gr.Column(scale=2): |
|
|
gr.Markdown("---") |
|
|
gr.Markdown("<p><strong style='color: #FF0000; font-size: 1.2em;'>Transcription Results (Click row to play segment)</strong></p>") |
|
|
download_btn = gr.DownloadButton(label="Download Transcript (CSV)", visible=False) |
|
|
vis_timestamps_df = gr.DataFrame( |
|
|
headers=["Start (s)", "End (s)", "Segment"], |
|
|
datatype=["number", "number", "str"], |
|
|
wrap=True, |
|
|
label="Transcription Segments" |
|
|
) |
|
|
selected_segment_player = gr.Audio(label="Selected Segment", interactive=False) |
|
|
|
|
|
|
|
|
with gr.Column(scale=1): |
|
|
summary_btn = gr.Button("Summarize Transcript", variant="primary") |
|
|
summary_box = gr.Textbox(label="Summary", lines=5, interactive=False) |
|
|
|
|
|
|
|
|
def handle_transcribe(audio_path): |
|
|
vis_data, raw_times, audio_path, download_btn_obj = get_transcripts_and_raw_times(audio_path) |
|
|
transcript = get_full_transcript(vis_data) |
|
|
return vis_data, raw_times, audio_path, download_btn_obj, vis_data, transcript |
|
|
|
|
|
mic_transcribe_btn.click( |
|
|
fn=handle_transcribe, |
|
|
inputs=[mic_input], |
|
|
outputs=[vis_timestamps_df, raw_timestamps_list_state, current_audio_path_state, download_btn, vis_data_state, transcript_box], |
|
|
api_name="transcribe_mic" |
|
|
) |
|
|
|
|
|
file_transcribe_btn.click( |
|
|
fn=handle_transcribe, |
|
|
inputs=[file_input], |
|
|
outputs=[vis_timestamps_df, raw_timestamps_list_state, current_audio_path_state, download_btn, vis_data_state, transcript_box], |
|
|
api_name="transcribe_file" |
|
|
) |
|
|
|
|
|
vis_timestamps_df.select( |
|
|
fn=play_segment, |
|
|
inputs=[raw_timestamps_list_state, current_audio_path_state], |
|
|
outputs=[selected_segment_player], |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
print("Launching AudioDog...") |
|
|
demo.queue() |
|
|
demo.launch() |