| import streamlit as st |
| from transformers import WhisperForConditionalGeneration, WhisperTokenizer, WhisperProcessor |
| import torchaudio |
| import torch |
| import io |
|
|
| device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
|
| |
| model_path= "Danyal-AI/Whisper-FT_on_indicVoices" |
|
|
| tokenizer = WhisperTokenizer.from_pretrained(model_path, language="Hindi", task="transcribe") |
| feature_extractor = WhisperProcessor.from_pretrained(model_path, language="Hindi", task="transcribe") |
| model = WhisperForConditionalGeneration.from_pretrained(model_path).to(device) |
|
|
| def transcribe_audio(audio_bytes): |
| audio_stream = io.BytesIO(audio_bytes) |
| speech_array, sr = torchaudio.load(audio_stream) |
| resampler = torchaudio.transforms.Resample(orig_freq=sr, new_freq=16000) |
| speech_array = resampler(speech_array) |
| input_features = feature_extractor(speech_array.squeeze(0), sampling_rate=16000).input_features |
| inputs = {"input_features": torch.tensor(input_features).to(device)} |
| generated_ids = model.generate(**inputs) |
| transcription = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) |
| return transcription[0] |
|
|
| st.title("Hindi Speech Transcription") |
| st.write("Upload an audio file to transcribe Hindi speech.") |
|
|
| uploaded_file = st.file_uploader("Choose an audio file...", type=["wav", "mp3", "m4a"]) |
|
|
| if uploaded_file is not None: |
|
|
| audio_bytes = uploaded_file.read() |
| transcription = transcribe_audio(audio_bytes) |
| st.write("Transcription:") |
| st.write(transcription) |
|
|