Aybee5 commited on
Commit
bf96c87
·
verified ·
1 Parent(s): 495c7eb

Upload README.md with huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +83 -19
README.md CHANGED
@@ -59,23 +59,37 @@ Each example contains:
59
 
60
  ### Recommended: Download All Files First
61
 
62
- To ensure all audio files are available, download the entire dataset first:
63
 
64
  ```python
65
- from huggingface_hub import snapshot_download, login
66
  from datasets import load_dataset, Audio
67
  import os
 
68
 
69
- # Login to HuggingFace (to avoid rate limits)
70
- # Get your token from: https://huggingface.co/settings/tokens
71
- login() # Will prompt for token in Colab, or use: login(token="your_token_here")
 
 
 
 
 
 
 
 
 
 
72
 
73
  # Download entire dataset (parquet + all audio files)
74
- print("Downloading dataset (~2GB)...")
75
  local_dir = snapshot_download(
76
  "Aybee5/HausaTTSEmbed",
77
  repo_type="dataset",
78
- local_dir="hausa_tts_data"
 
 
 
79
  )
80
 
81
  # Load from downloaded files
@@ -99,34 +113,76 @@ print(f"Text: {sample['text']}")
99
  print(f"Audio shape: {sample['audio']['array'].shape}")
100
  ```
101
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  ### For Unsloth TTS Training (Complete Code)
103
 
104
  Use this complete code in your Unsloth/Colab notebook:
105
 
106
  ```python
107
- from huggingface_hub import snapshot_download
108
  from datasets import load_dataset, Audio
109
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
- # Step 1: Download entire dataset
112
- print("Downloading Hausa TTS dataset...")
113
  local_dir = snapshot_download(
114
  "Aybee5/HausaTTSEmbed",
115
  repo_type="dataset",
116
- local_dir="/content/hausa_tts" # Use /content/ for Colab
 
 
 
117
  )
118
 
119
- # Step 2: Load from downloaded files
 
 
120
  raw_ds = load_dataset(
121
  "parquet",
122
  data_files=f"{local_dir}/data/*.parquet",
123
  split="train"
124
  )
125
 
126
- # Step 3: Fix audio paths
127
  raw_ds = raw_ds.map(lambda x: {"audio": os.path.join(local_dir, x["audio"]), **x})
128
 
129
- # Step 4: Speaker handling (Unsloth's exact code)
130
  speaker_key = "source"
131
  if "source" not in raw_ds.column_names and "speaker_id" not in raw_ds.column_names:
132
  print("Unsloth: No speaker found, adding default source")
@@ -135,22 +191,30 @@ if "source" not in raw_ds.column_names and "speaker_id" not in raw_ds.column_nam
135
  elif "source" not in raw_ds.column_names and "speaker_id" in raw_ds.column_names:
136
  speaker_key = "speaker_id"
137
 
138
- # Step 5: Resample to target sample rate
139
  target_sampling_rate = 24000
140
  raw_ds = raw_ds.cast_column("audio", Audio(sampling_rate=target_sampling_rate))
141
 
142
  print(f"✓ Dataset ready: {len(raw_ds)} samples")
 
143
 
144
- # Step 6: Optional - Split into train/validation
145
  split_ds = raw_ds.train_test_split(test_size=0.1, seed=42)
146
  train_ds = split_ds['train']
147
  val_ds = split_ds['test']
148
 
149
- # Step 7: Continue with your Unsloth training!
150
- # processed_ds = raw_ds.map(preprocess_example, ...)
 
 
151
  ```
152
 
153
- This will work without FileNotFoundError! 🎉
 
 
 
 
 
154
 
155
  ### With Transformers
156
 
 
59
 
60
  ### Recommended: Download All Files First
61
 
62
+ To ensure all audio files are available and avoid rate limits, authenticate first:
63
 
64
  ```python
65
+ from huggingface_hub import snapshot_download, HfApi
66
  from datasets import load_dataset, Audio
67
  import os
68
+ import time
69
 
70
+ # IMPORTANT: Login FIRST and WAIT for confirmation
71
+ # Method 1: Using token directly (RECOMMENDED for Colab)
72
+ from huggingface_hub import login
73
+ HF_TOKEN = "hf_YourTokenHere" # Get from https://huggingface.co/settings/tokens
74
+ login(token=HF_TOKEN)
75
+
76
+ # Verify login worked
77
+ api = HfApi()
78
+ user_info = api.whoami(token=HF_TOKEN)
79
+ print(f"✓ Logged in as: {user_info['name']}")
80
+
81
+ # Small delay to ensure auth propagates
82
+ time.sleep(2)
83
 
84
  # Download entire dataset (parquet + all audio files)
85
+ print("\nDownloading dataset (~2GB)...")
86
  local_dir = snapshot_download(
87
  "Aybee5/HausaTTSEmbed",
88
  repo_type="dataset",
89
+ local_dir="hausa_tts_data",
90
+ token=HF_TOKEN, # Pass token explicitly
91
+ max_workers=1, # Reduce concurrent requests to avoid rate limits
92
+ resume_download=True # Resume if interrupted
93
  )
94
 
95
  # Load from downloaded files
 
113
  print(f"Audio shape: {sample['audio']['array'].shape}")
114
  ```
115
 
116
+ **Alternative: Interactive Login (prompts for token)**
117
+
118
+ ```python
119
+ from huggingface_hub import login, snapshot_download
120
+ import time
121
+
122
+ # This will prompt you to paste your token
123
+ login()
124
+ time.sleep(2) # Wait for auth to propagate
125
+
126
+ # Then download
127
+ local_dir = snapshot_download(
128
+ "Aybee5/HausaTTSEmbed",
129
+ repo_type="dataset",
130
+ local_dir="hausa_tts_data",
131
+ max_workers=1 # Reduce concurrent requests
132
+ )
133
+ ```
134
+
135
  ### For Unsloth TTS Training (Complete Code)
136
 
137
  Use this complete code in your Unsloth/Colab notebook:
138
 
139
  ```python
140
+ from huggingface_hub import snapshot_download, login, HfApi
141
  from datasets import load_dataset, Audio
142
  import os
143
+ import time
144
+
145
+ # ==================== STEP 1: AUTHENTICATE ====================
146
+ # Replace with your actual token from https://huggingface.co/settings/tokens
147
+ HF_TOKEN = "hf_YourTokenHere"
148
+
149
+ print("Authenticating with HuggingFace...")
150
+ login(token=HF_TOKEN)
151
+
152
+ # Verify authentication
153
+ api = HfApi()
154
+ user_info = api.whoami(token=HF_TOKEN)
155
+ print(f"✓ Logged in as: {user_info['name']}\n")
156
+
157
+ # Wait for auth to propagate
158
+ time.sleep(2)
159
+
160
+ # ==================== STEP 2: DOWNLOAD DATASET ====================
161
+ print("Downloading Hausa TTS dataset (~2GB)...")
162
+ print("Using reduced concurrency to avoid rate limits...\n")
163
 
 
 
164
  local_dir = snapshot_download(
165
  "Aybee5/HausaTTSEmbed",
166
  repo_type="dataset",
167
+ local_dir="/content/hausa_tts", # Use /content/ for Colab
168
+ token=HF_TOKEN, # Pass token explicitly
169
+ max_workers=1, # Single threaded to avoid rate limits
170
+ resume_download=True
171
  )
172
 
173
+ print(f"✓ Downloaded to: {local_dir}\n")
174
+
175
+ # ==================== STEP 3: LOAD DATASET ====================
176
  raw_ds = load_dataset(
177
  "parquet",
178
  data_files=f"{local_dir}/data/*.parquet",
179
  split="train"
180
  )
181
 
182
+ # ==================== STEP 4: FIX AUDIO PATHS ====================
183
  raw_ds = raw_ds.map(lambda x: {"audio": os.path.join(local_dir, x["audio"]), **x})
184
 
185
+ # ==================== STEP 5: HANDLE SPEAKERS ====================
186
  speaker_key = "source"
187
  if "source" not in raw_ds.column_names and "speaker_id" not in raw_ds.column_names:
188
  print("Unsloth: No speaker found, adding default source")
 
191
  elif "source" not in raw_ds.column_names and "speaker_id" in raw_ds.column_names:
192
  speaker_key = "speaker_id"
193
 
194
+ # ==================== STEP 6: RESAMPLE AUDIO ====================
195
  target_sampling_rate = 24000
196
  raw_ds = raw_ds.cast_column("audio", Audio(sampling_rate=target_sampling_rate))
197
 
198
  print(f"✓ Dataset ready: {len(raw_ds)} samples")
199
+ print(f"✓ Speaker column: {speaker_key}\n")
200
 
201
+ # ==================== STEP 7: OPTIONAL SPLIT ====================
202
  split_ds = raw_ds.train_test_split(test_size=0.1, seed=42)
203
  train_ds = split_ds['train']
204
  val_ds = split_ds['test']
205
 
206
+ print(f"✓ Train: {len(train_ds)} samples")
207
+ print(f"✓ Validation: {len(val_ds)} samples")
208
+
209
+ # Continue with your Unsloth training!
210
  ```
211
 
212
+ **Key Changes to Avoid Rate Limits:**
213
+ 1. ✅ Pass `token=HF_TOKEN` explicitly to `snapshot_download()`
214
+ 2. ✅ Set `max_workers=1` to reduce concurrent requests
215
+ 3. ✅ Add `time.sleep(2)` after login to ensure auth propagates
216
+ 4. ✅ Verify authentication with `api.whoami()` before downloading
217
+ 5. ✅ Use `resume_download=True` to handle interruptions
218
 
219
  ### With Transformers
220