File size: 9,365 Bytes
8fa61a8
 
 
 
 
 
 
 
 
380e300
 
8fa61a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
380e300
8fa61a8
 
 
 
 
 
 
 
 
380e300
8fa61a8
 
 
 
 
 
380e300
8fa61a8
380e300
8fa61a8
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274

!pip install -U git+https://github.com/openai/whisper.git
!pip install -U gradio
!pip install torch
!pip install numpy
!pip install ffmpeg-python
!pip install tqdm
!pip install jiwer 

from huggingface_hub import InferenceClient

import os
import asyncio
import whisper
import gradio as gr
import torch
import shutil
import logging
from pathlib import Path
import concurrent.futures
import ffmpeg
import re
import threading
from tqdm.notebook import tqdm
import numpy as np

# --- File Handling ---
# Define paths and constants
TEMP_FOLDER = '/content/temp/'
SUPPORTED_AUDIO_FORMATS = ['.mp3', '.wav', '.aac', '.flac', '.ogg', '.m4a', '.amr', '.wma']
SUPPORTED_VIDEO_FORMATS = ['.mp4', '.avi', '.mov', '.wmv', '.mkv', '.webm', '.3gp']
SUPPORTED_FORMATS = SUPPORTED_AUDIO_FORMATS + SUPPORTED_VIDEO_FORMATS

def create_folders():
    """Creates the necessary temporary folder if it doesn't exist."""
    Path(TEMP_FOLDER).mkdir(parents=True, exist_ok=True)

def is_supported_format(file):
    """Checks if a file has a supported audio/video format."""
    if file is not None: 
      return any(file.lower().endswith(ext) for ext in SUPPORTED_FORMATS)
    else:
      return False 

def convert_to_wav(original_file_path):
    """Converts input file to WAV format."""
    output_path = os.path.join(TEMP_FOLDER, os.path.splitext(os.path.basename(original_file_path))[0] + '.wav')
    try:
        (
            ffmpeg
            .input(original_file_path)  
            .output(output_path, acodec='pcm_s16le', ac=1, ar='16k')
            .overwrite_output()
            .run(capture_stdout=True, capture_stderr=True)
        )
        return output_path
    except ffmpeg.Error as e:
        print(f'Error converting {original_file_path}: {e.stderr.decode()}')
        return None

def delete_temp_file(file_path):
    """Deletes a temporary file."""
    if os.path.exists(file_path):
        os.remove(file_path)

# --- Transcription ---
class WhisperModelCache:
    """Singleton class to load and cache the Whisper model."""

    _instance = None

    @staticmethod
    def get_instance():
        """Get the singleton instance."""
        if WhisperModelCache._instance is None:
            WhisperModelCache._instance = WhisperModelCache()
        return WhisperModelCache._instance

    def __init__(self):
        self.model = None
        self.device = None

    def load_model(self):
        """Loads the Whisper model, prioritizing GPU and handling memory."""
        if self.model is None:
            self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
            logging.info(f"Using device: {self.device}")

            model_size = "large-v2" if torch.cuda.is_available() else "medium"
            logging.info(f"Loading Whisper model: {model_size}")

            try:
                self.model = whisper.load_model(model_size, device=self.device)
            except RuntimeError as e:
                if "out of memory" in str(e):
                    logging.error(f"Error: {e}")
                    logging.warning("Falling back to 'medium' model size due to memory constraints.")
                    self.model = whisper.load_model("medium", device=self.device)
                else:
                    raise e

        return self.model
    def unload_model(self):
        """Unloads the model and clears CUDA cache."""
        if self.model is not None:
            del self.model
            self.model = None
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
        logging.info("Model unloaded and CUDA cache cleared.")

async def transcribe_audio(audio_path, language, progress_bar, 
                          task='transcribe', initial_prompt=None, 
                          temperature=0.5, chunk_duration=30):
    """Transcribes audio using Whisper, handling chunking and errors."""

    try:
        model = WhisperModelCache.get_instance().load_model()
        device = WhisperModelCache.get_instance().device

        probe = ffmpeg.probe(audio_path)
        total_duration = float(probe['format']['duration'])
        num_chunks = int(total_duration // chunk_duration) + (total_duration % chunk_duration > 0)
        progress_per_chunk = 20 / num_chunks 

        full_transcription = ""

        for chunk_idx in range(num_chunks):
            start_time = chunk_idx * chunk_duration
            end_time = min((chunk_idx + 1) * chunk_duration, total_duration)
            temp_chunk_path = f"{TEMP_FOLDER}/temp_chunk_{chunk_idx}.wav"

            try: 
                (
                    ffmpeg
                    .input(audio_path)
                    .filter('atrim', start=start_time, end=end_time)
                    .output(temp_chunk_path, acodec='pcm_s16le', ac=1, ar='16k')
                    .overwrite_output()
                    .run(capture_stdout=True, capture_stderr=True)
                )
            except ffmpeg.Error as e:
                logging.error(f"Error extracting audio chunk: {e.stderr.decode()}")
                return "Error: Could not extract audio chunk for transcription"

            result = await asyncio.to_thread(model.transcribe, temp_chunk_path,
                                            language=language,
                                            task=task,
                                            initial_prompt=initial_prompt,
                                            temperature=temperature)

            full_transcription += result['text'] 

            progress_bar.update(progress_per_chunk)
            delete_temp_file(temp_chunk_path)

        return full_transcription

    except Exception as e:
        logging.error(f"Error transcribing {audio_path}: {str(e)}")
        return f"Error during transcription: {str(e)}"

# --- Anonymization --- 
def anonymize_text(text):
    """Anonymizes personal information in text."""
    text = re.sub(r'\b[A-Z][a-z]+ [A-Z][a-z]+\b|\S+@\S+|\d{3}[-.]?\d{3}[-.]?\d{4}',
                  lambda m: '[NAME]' if re.match(r'\b[A-Z][a-z]+ [A-Z][a-z]+\b', m.group()) else
                            '[EMAIL]' if '@' in m.group() else '[PHONE]',
                  text)
    return text

# --- Gradio UI ---
async def process_audio(file, language, anonymize):
    """Processes audio: validation, conversion, transcription, anonymization, cleanup."""
    try:
        if file is None:
            return "Error: Please upload an audio or video file."

        if not is_supported_format(file):  
            raise ValueError(f"Unsupported file format: {file}")

        progress_bar = tqdm(total=100, desc="Overall Process", unit="%", position=0, leave=True) 

        progress_bar.update(10) 

        temp_audio_path = convert_to_wav(file)
        if not temp_audio_path:
            raise ValueError(f"Failed to convert {file} to WAV format.") 

        progress_bar.update(30)  

        transcription = await transcribe_audio(temp_audio_path, language, progress_bar)

        progress_bar.update(20)  

        delete_temp_file(temp_audio_path)  

        if anonymize:
            transcription = anonymize_text(transcription)
            progress_bar.update(10) 

        progress_bar.update(30) 

        progress_bar.close()

        return transcription

    except Exception as e:
        print(f"Error processing audio: {e}")  
        return f"Error: {str(e)}"  

def create_ui():
  """Create the Gradio UI."""
  language_choices = ["en", "es", "fr", "de", "it", "pt", "nl", "ru", "zh", "ja", "ko", "ar", "he", "iw", "ar", "auto"]
  output_format_choices = ["txt", "srt", "vtt", "tsv", "json"]

  with gr.Blocks() as interface:
    with gr.Row():
      with gr.Column():
        audio_input = gr.Audio(label="Upload Audio/Video", type="filepath")

        task_dropdown = gr.Dropdown(
            choices=["Transcribe", "Translate"],
            label="Task", 
            value="Transcribe"
        )

        language_dropdown = gr.Dropdown(
            choices=language_choices, 
            label="Language", 
            value="en", # Default to English
            info="Select 'auto' for automatic language detection."
        )

        output_format_checkbox_group = gr.CheckboxGroup(
            choices=output_format_choices,
            label="Output Formats", 
            value=["txt"]
        )

        anonymize_checkbox = gr.Checkbox(label="Anonymize Transcription")

        prompt_input = gr.Textbox(
            label="Initial Prompt",
            lines=2,
            placeholder="Optional prompt to guide transcription"
        ) 

        temperature_slider = gr.Slider(
            minimum=0.0,
            maximum=1.0,
            value=0.5, 
            label="Temperature"
        ) 

        timestamps_checkbox = gr.Checkbox(label="Include Word Timestamps")

        transcribe_button = gr.Button(value="Transcribe")
      with gr.Column():
        transcription_output = gr.Textbox(label="Transcription", lines=10)

    transcribe_button.click(
      fn=process_audio,
      inputs=[audio_input, language_dropdown, anonymize_checkbox], 
      outputs=transcription_output
    )
  return interface 

# --- Main Execution ---
if __name__ == "__main__":
    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
    create_folders()
    iface = create_ui()
    iface.launch(debug=True, share=True)