Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,7 +1,12 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import whisper
|
|
|
|
| 3 |
import os
|
| 4 |
from pydub import AudioSegment
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
# Mapping of model names to Whisper model sizes
|
| 7 |
MODELS = {
|
|
@@ -12,6 +17,15 @@ MODELS = {
|
|
| 12 |
"Large (Most Accurate)": "large"
|
| 13 |
}
|
| 14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
# Mapping of full language names to language codes
|
| 16 |
LANGUAGE_NAME_TO_CODE = {
|
| 17 |
"Auto Detect": "Auto Detect",
|
|
@@ -116,61 +130,55 @@ LANGUAGE_NAME_TO_CODE = {
|
|
| 116 |
"Sundanese": "su",
|
| 117 |
}
|
| 118 |
|
| 119 |
-
def detect_language(audio_file):
|
| 120 |
-
"""Detect the language of the audio file."""
|
| 121 |
-
# Load the Whisper model (use "base" for faster detection)
|
| 122 |
-
model = whisper.load_model("base")
|
| 123 |
-
|
| 124 |
-
# Convert audio to 16kHz mono for better compatibility with Whisper
|
| 125 |
-
audio = AudioSegment.from_file(audio_file)
|
| 126 |
-
audio = audio.set_frame_rate(16000).set_channels(1)
|
| 127 |
-
processed_audio_path = "processed_audio.wav"
|
| 128 |
-
audio.export(processed_audio_path, format="wav")
|
| 129 |
-
|
| 130 |
-
# Detect the language
|
| 131 |
-
result = model.transcribe(processed_audio_path, task="detect_language", fp16=False)
|
| 132 |
-
detected_language = result.get("language", "unknown")
|
| 133 |
-
|
| 134 |
-
# Clean up processed audio file
|
| 135 |
-
os.remove(processed_audio_path)
|
| 136 |
-
|
| 137 |
-
return f"Detected Language: {detected_language}"
|
| 138 |
-
|
| 139 |
def transcribe_audio(audio_file, language="Auto Detect", model_size="Base (Faster)"):
|
| 140 |
"""Transcribe the audio file."""
|
| 141 |
-
#
|
| 142 |
-
model = whisper.load_model(MODELS[model_size])
|
| 143 |
-
|
| 144 |
-
# Convert audio to 16kHz mono for better compatibility with Whisper
|
| 145 |
audio = AudioSegment.from_file(audio_file)
|
| 146 |
audio = audio.set_frame_rate(16000).set_channels(1)
|
| 147 |
processed_audio_path = "processed_audio.wav"
|
| 148 |
audio.export(processed_audio_path, format="wav")
|
| 149 |
|
| 150 |
-
#
|
| 151 |
-
if language
|
| 152 |
-
|
| 153 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
else:
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 158 |
|
| 159 |
# Clean up processed audio file
|
| 160 |
os.remove(processed_audio_path)
|
| 161 |
|
| 162 |
# Return transcription and detected language
|
| 163 |
-
return f"Detected Language: {detected_language}\n\nTranscription:\n{
|
| 164 |
|
| 165 |
# Define the Gradio interface
|
| 166 |
with gr.Blocks() as demo:
|
| 167 |
-
gr.Markdown("# Audio Transcription
|
| 168 |
-
|
| 169 |
-
with gr.Tab("Detect Language"):
|
| 170 |
-
gr.Markdown("Upload an audio file to detect its language.")
|
| 171 |
-
detect_audio_input = gr.Audio(type="filepath", label="Upload Audio File")
|
| 172 |
-
detect_language_output = gr.Textbox(label="Detected Language")
|
| 173 |
-
detect_button = gr.Button("Detect Language")
|
| 174 |
|
| 175 |
with gr.Tab("Transcribe Audio"):
|
| 176 |
gr.Markdown("Upload an audio file, select a language (or choose 'Auto Detect'), and choose a model for transcription.")
|
|
@@ -183,13 +191,24 @@ with gr.Blocks() as demo:
|
|
| 183 |
model_dropdown = gr.Dropdown(
|
| 184 |
choices=list(MODELS.keys()), # Model options
|
| 185 |
label="Select Model",
|
| 186 |
-
value="Base (Faster)" # Default to "Base" model
|
|
|
|
| 187 |
)
|
| 188 |
transcribe_output = gr.Textbox(label="Transcription and Detected Language")
|
| 189 |
transcribe_button = gr.Button("Transcribe Audio")
|
| 190 |
|
| 191 |
-
#
|
| 192 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 193 |
transcribe_button.click(transcribe_audio, inputs=[transcribe_audio_input, language_dropdown, model_dropdown], outputs=transcribe_output)
|
| 194 |
|
| 195 |
# Launch the Gradio interface
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import whisper
|
| 3 |
+
import torch
|
| 4 |
import os
|
| 5 |
from pydub import AudioSegment
|
| 6 |
+
from transformers import pipeline
|
| 7 |
+
|
| 8 |
+
# Ensure compatible versions of torch and transformers are installed
|
| 9 |
+
# Run: pip install torch==1.13.1 transformers==4.26.1
|
| 10 |
|
| 11 |
# Mapping of model names to Whisper model sizes
|
| 12 |
MODELS = {
|
|
|
|
| 17 |
"Large (Most Accurate)": "large"
|
| 18 |
}
|
| 19 |
|
| 20 |
+
# Fine-tuned models for specific languages
|
| 21 |
+
FINE_TUNED_MODELS = {
|
| 22 |
+
"Tamil": {
|
| 23 |
+
"model": "vasista22/whisper-tamil-medium",
|
| 24 |
+
"language": "ta"
|
| 25 |
+
},
|
| 26 |
+
# Add more fine-tuned models for other languages here
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
# Mapping of full language names to language codes
|
| 30 |
LANGUAGE_NAME_TO_CODE = {
|
| 31 |
"Auto Detect": "Auto Detect",
|
|
|
|
| 130 |
"Sundanese": "su",
|
| 131 |
}
|
| 132 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
def transcribe_audio(audio_file, language="Auto Detect", model_size="Base (Faster)"):
|
| 134 |
"""Transcribe the audio file."""
|
| 135 |
+
# Convert audio to 16kHz mono for better compatibility
|
|
|
|
|
|
|
|
|
|
| 136 |
audio = AudioSegment.from_file(audio_file)
|
| 137 |
audio = audio.set_frame_rate(16000).set_channels(1)
|
| 138 |
processed_audio_path = "processed_audio.wav"
|
| 139 |
audio.export(processed_audio_path, format="wav")
|
| 140 |
|
| 141 |
+
# Load the appropriate model
|
| 142 |
+
if language in FINE_TUNED_MODELS:
|
| 143 |
+
# Use the fine-tuned Whisper model for the selected language
|
| 144 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
| 145 |
+
transcribe = pipeline(
|
| 146 |
+
task="automatic-speech-recognition",
|
| 147 |
+
model=FINE_TUNED_MODELS[language]["model"],
|
| 148 |
+
chunk_length_s=30,
|
| 149 |
+
device=device
|
| 150 |
+
)
|
| 151 |
+
transcribe.model.config.forced_decoder_ids = transcribe.tokenizer.get_decoder_prompt_ids(
|
| 152 |
+
language=FINE_TUNED_MODELS[language]["language"],
|
| 153 |
+
task="transcribe"
|
| 154 |
+
)
|
| 155 |
+
result = transcribe(processed_audio_path)
|
| 156 |
+
transcription = result["text"]
|
| 157 |
+
detected_language = language
|
| 158 |
else:
|
| 159 |
+
# Use the selected Whisper model
|
| 160 |
+
model = whisper.load_model(MODELS[model_size])
|
| 161 |
+
|
| 162 |
+
# Transcribe the audio
|
| 163 |
+
if language == "Auto Detect":
|
| 164 |
+
result = model.transcribe(processed_audio_path, fp16=False) # Auto-detect language
|
| 165 |
+
detected_language = result.get("language", "unknown")
|
| 166 |
+
else:
|
| 167 |
+
language_code = LANGUAGE_NAME_TO_CODE.get(language, "en") # Default to English if not found
|
| 168 |
+
result = model.transcribe(processed_audio_path, language=language_code, fp16=False)
|
| 169 |
+
detected_language = language_code
|
| 170 |
+
|
| 171 |
+
transcription = result["text"]
|
| 172 |
|
| 173 |
# Clean up processed audio file
|
| 174 |
os.remove(processed_audio_path)
|
| 175 |
|
| 176 |
# Return transcription and detected language
|
| 177 |
+
return f"Detected Language: {detected_language}\n\nTranscription:\n{transcription}"
|
| 178 |
|
| 179 |
# Define the Gradio interface
|
| 180 |
with gr.Blocks() as demo:
|
| 181 |
+
gr.Markdown("# Audio Transcription with Fine-Tuned Models")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 182 |
|
| 183 |
with gr.Tab("Transcribe Audio"):
|
| 184 |
gr.Markdown("Upload an audio file, select a language (or choose 'Auto Detect'), and choose a model for transcription.")
|
|
|
|
| 191 |
model_dropdown = gr.Dropdown(
|
| 192 |
choices=list(MODELS.keys()), # Model options
|
| 193 |
label="Select Model",
|
| 194 |
+
value="Base (Faster)", # Default to "Base" model
|
| 195 |
+
interactive=True # Allow model selection by default
|
| 196 |
)
|
| 197 |
transcribe_output = gr.Textbox(label="Transcription and Detected Language")
|
| 198 |
transcribe_button = gr.Button("Transcribe Audio")
|
| 199 |
|
| 200 |
+
# Update model dropdown based on language selection
|
| 201 |
+
def update_model_dropdown(language):
|
| 202 |
+
if language in FINE_TUNED_MODELS:
|
| 203 |
+
# Add "Fine-Tuned Model" to the dropdown choices and disable it
|
| 204 |
+
return gr.Dropdown(choices=["Fine-Tuned Model"], value="Fine-Tuned Model", interactive=False)
|
| 205 |
+
else:
|
| 206 |
+
# Reset the dropdown to standard Whisper models
|
| 207 |
+
return gr.Dropdown(choices=list(MODELS.keys()), value="Base (Faster)", interactive=True)
|
| 208 |
+
|
| 209 |
+
language_dropdown.change(update_model_dropdown, inputs=language_dropdown, outputs=model_dropdown)
|
| 210 |
+
|
| 211 |
+
# Link button to function
|
| 212 |
transcribe_button.click(transcribe_audio, inputs=[transcribe_audio_input, language_dropdown, model_dropdown], outputs=transcribe_output)
|
| 213 |
|
| 214 |
# Launch the Gradio interface
|