|
|
|
|
|
|
|
|
|
|
|
|
|
|
| from transformers import WhisperProcessor, WhisperForConditionalGeneration
|
| import torch
|
| import soundfile as sf
|
| import librosa
|
|
|
|
|
| processor = WhisperProcessor.from_pretrained("naminh93/lyric_pho", language="vi", task="transcribe")
|
| model = WhisperForConditionalGeneration.from_pretrained("naminh93/lyric_pho")
|
|
|
|
|
| audio_path = "1.wav"
|
| audio, sr = librosa.load(audio_path, sr=16000)
|
|
|
|
|
| inputs = processor(audio, sampling_rate=16000, return_tensors="pt")
|
|
|
| with torch.no_grad():
|
| predicted_ids = model.generate(inputs["input_features"])
|
|
|
|
|
| transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
|
|
|
| print("\n===== KẾT QUẢ NHẬN DẠNG GIỌNG NÓI =====")
|
| print(transcription)
|
|
|