File size: 2,040 Bytes
cb7382b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
#!/usr/bin/env python3
"""
Example: Using converted HuBERT CTC model for speech recognition
"""

import torch
from transformers import Wav2Vec2Processor, HubertForCTC
from datasets import load_dataset

# Load model and processor
print("Loading model and processor...")
processor = Wav2Vec2Processor.from_pretrained("./converted_ctc_models")
model = HubertForCTC.from_pretrained("./converted_ctc_models")

# For GPU if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
model.eval()

# Load sample audio
print("\nLoading sample audio...")
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation", trust_remote_code=True)
sample = ds[0]

# Process audio
audio_input = sample["audio"]["array"]
sampling_rate = sample["audio"]["sampling_rate"]

# Ensure 16kHz
if sampling_rate != 16000:
    import librosa
    audio_input = librosa.resample(audio_input, orig_sr=sampling_rate, target_sr=16000)
    sampling_rate = 16000

# Prepare input
inputs = processor(
    audio_input, 
    return_tensors="pt", 
    sampling_rate=sampling_rate
).to(device)

# Run inference
print("\nRunning inference...")
with torch.no_grad():
    logits = model(**inputs).logits

# Decode
predicted_ids = torch.argmax(logits, dim=-1)
transcription = processor.decode(predicted_ids[0])

print(f"\nTranscription: '{transcription}'")
print(f"Expected: '{sample['text']}'")

# Batch processing example
print("\n\nBatch processing example:")
print("-"*40)

# Process multiple samples
batch_size = 4
audio_samples = [ds[i]["audio"]["array"] for i in range(batch_size)]

# Pad to same length
inputs = processor(
    audio_samples,
    return_tensors="pt",
    padding=True,
    sampling_rate=16000
).to(device)

# Batch inference
with torch.no_grad():
    logits = model(**inputs).logits

# Decode all
predicted_ids = torch.argmax(logits, dim=-1)
transcriptions = processor.batch_decode(predicted_ids)

for i, transcription in enumerate(transcriptions):
    print(f"Sample {i+1}: '{transcription}'")