Datasets:
Upload README.md with huggingface_hub
Browse files
README.md
CHANGED
|
@@ -41,8 +41,11 @@ This dataset is derived from [LibriSpeech](https://huggingface.co/datasets/opens
|
|
| 41 |
|
| 42 |
| Feature | Type | Description |
|
| 43 |
|---------|------|-------------|
|
| 44 |
-
| `
|
|
|
|
| 45 |
| `labels` | `Sequence[int]` | ARPAbet phoneme token IDs |
|
|
|
|
|
|
|
| 46 |
|
| 47 |
### ARPAbet Vocabulary (72 tokens)
|
| 48 |
|
|
@@ -66,9 +69,18 @@ from datasets import load_dataset
|
|
| 66 |
from huggingface_hub import hf_hub_download
|
| 67 |
import json
|
| 68 |
|
| 69 |
-
# Load the dataset
|
| 70 |
dataset = load_dataset("davidggphy/librispeech-arpabet-processed")
|
| 71 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
# Load vocabulary mapping
|
| 73 |
vocab_path = hf_hub_download(
|
| 74 |
repo_id="davidggphy/librispeech-arpabet-processed",
|
|
@@ -82,11 +94,37 @@ token_to_id = vocab_data["token_to_id"]
|
|
| 82 |
id_to_token = {int(k): v for k, v in vocab_data["id_to_token"].items()}
|
| 83 |
|
| 84 |
# Access samples
|
| 85 |
-
sample = dataset[
|
| 86 |
-
print(f"
|
|
|
|
| 87 |
print(f"Labels: {[id_to_token[i] for i in sample['labels']]}")
|
| 88 |
```
|
| 89 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
### Training with Wav2Vec2
|
| 91 |
|
| 92 |
```python
|
|
|
|
| 41 |
|
| 42 |
| Feature | Type | Description |
|
| 43 |
|---------|------|-------------|
|
| 44 |
+
| `audio` | `Audio` | Original audio (FLAC, 16kHz) - playable in HF viewer |
|
| 45 |
+
| `input_values` | `Sequence[float]` | Normalized audio waveform (float32) - ready for training |
|
| 46 |
| `labels` | `Sequence[int]` | ARPAbet phoneme token IDs |
|
| 47 |
+
| `text` | `string` | Original text transcription |
|
| 48 |
+
| `duration` | `float` | Audio duration in seconds |
|
| 49 |
|
| 50 |
### ARPAbet Vocabulary (72 tokens)
|
| 51 |
|
|
|
|
| 69 |
from huggingface_hub import hf_hub_download
|
| 70 |
import json
|
| 71 |
|
| 72 |
+
# Load the full dataset
|
| 73 |
dataset = load_dataset("davidggphy/librispeech-arpabet-processed")
|
| 74 |
|
| 75 |
+
# Or load a subset (faster for testing)
|
| 76 |
+
dataset = load_dataset("davidggphy/librispeech-arpabet-processed", split="train[:1000]") # First 1000
|
| 77 |
+
dataset = load_dataset("davidggphy/librispeech-arpabet-processed", split="train[:10%]") # First 10%
|
| 78 |
+
|
| 79 |
+
# Streaming mode (no full download)
|
| 80 |
+
stream = load_dataset("davidggphy/librispeech-arpabet-processed", split="train", streaming=True)
|
| 81 |
+
for sample in stream.take(100):
|
| 82 |
+
print(sample["text"])
|
| 83 |
+
|
| 84 |
# Load vocabulary mapping
|
| 85 |
vocab_path = hf_hub_download(
|
| 86 |
repo_id="davidggphy/librispeech-arpabet-processed",
|
|
|
|
| 94 |
id_to_token = {int(k): v for k, v in vocab_data["id_to_token"].items()}
|
| 95 |
|
| 96 |
# Access samples
|
| 97 |
+
sample = dataset[0]
|
| 98 |
+
print(f"Text: {sample['text']}")
|
| 99 |
+
print(f"Duration: {sample['duration']:.2f}s")
|
| 100 |
print(f"Labels: {[id_to_token[i] for i in sample['labels']]}")
|
| 101 |
```
|
| 102 |
|
| 103 |
+
### Listening to Audio
|
| 104 |
+
|
| 105 |
+
The `input_values` column contains normalized audio waveforms at 16kHz. To play or save the audio:
|
| 106 |
+
|
| 107 |
+
```python
|
| 108 |
+
import numpy as np
|
| 109 |
+
import soundfile as sf
|
| 110 |
+
from IPython.display import Audio
|
| 111 |
+
|
| 112 |
+
sample = dataset["train"][0]
|
| 113 |
+
|
| 114 |
+
# Convert to numpy array
|
| 115 |
+
audio = np.array(sample["input_values"], dtype=np.float32)
|
| 116 |
+
|
| 117 |
+
# Play in Jupyter/Colab
|
| 118 |
+
Audio(audio, rate=16000)
|
| 119 |
+
|
| 120 |
+
# Or save to file
|
| 121 |
+
sf.write("sample.wav", audio, 16000)
|
| 122 |
+
|
| 123 |
+
# Check duration
|
| 124 |
+
print(f"Duration: {sample['duration']:.2f}s")
|
| 125 |
+
print(f"Text: {sample['text']}")
|
| 126 |
+
```
|
| 127 |
+
|
| 128 |
### Training with Wav2Vec2
|
| 129 |
|
| 130 |
```python
|