Bgeorge commited on
Commit
fec55e8
·
verified ·
1 Parent(s): 66c50e6

Update README.md

Browse files

import os
import soundfile as sf
import pandas as pd
import librosa
import io # Import the io module
from datasets import load_dataset

# --- Configuration (Keep these the same) ---
DATASET_NAME = "Bgeorge/CNA_AudioDataset"
TARGET_SAMPLING_RATE = 22050
BASE_OUTPUT_DIR = "data"
WAVES_DIR = os.path.join(BASE_OUTPUT_DIR, "waves")
os.makedirs(WAVES_DIR, exist_ok=True)

# --- Load Dataset ---
print("Loading dataset...")
ds = load_dataset(DATASET_NAME)


# --- Revised Processing Function ---
def process_split(split_name, dataset_split):
"""Saves audio files and generates metadata for a given dataset split."""
print(f"\nProcessing {split_name} split...")
metadata = []

for i, example in enumerate(dataset_split):

# 1. MANUAL DECODING: Read the raw audio bytes
# The 'audio' column contains a dictionary with the 'bytes' key.
raw_audio_bytes = example['audio']['bytes']

# Use io.BytesIO to treat the raw bytes as a file object
audio_stream = io.BytesIO(raw_audio_bytes)

# Use soundfile.read to decode the audio stream into an array and SR
# This completely bypasses the datasets internal decoder (torchcodec)
try:
audio_array, original_sr = sf.read(audio_stream)
except Exception as e:
print(f"Skipping example {i} due to decoding error: {e}")
continue # Skip to the next example on error

text = example['text']
speaker_id = example.get('speaker_name', 'default_speaker')

# 2. Resample the audio using Librosa
if original_sr != TARGET_SAMPLING_RATE:
# Resampling requires the original sampling rate for librosa
audio_array = librosa.resample(
audio_array,
orig_sr=original_sr,
target_sr=TARGET_SAMPLING_RATE
)

# 3. Define output file path and name
file_name = f"{speaker_id}_{i:06d}.wav"
full_path = os.path.join(WAVES_DIR, file_name)
relative_path_for_csv = os.path.join("waves", file_name)

# 4. Save audio file
sf.write(full_path, audio_array, TARGET_SAMPLING_RATE, format='WAV')

# 5. Collect metadata
metadata.append({
'file_name': relative_path_for_csv,
'text': text,
'speaker_id': speaker_id
})

if (i + 1) % 1000 == 0:
print(f" Processed {i+1} examples...")

# 6. Create and save the metadata CSV
df_metadata = pd.DataFrame(metadata)
csv_filename = f"metadata{split_name}.csv"
csv_path = os.path.join(BASE_OUTPUT_DIR, csv_filename)
df_metadata.to_csv(csv_path, sep='|', index=False, header=False)

print(f"Finished processing {len(df_metadata)} examples for {split_name}.")
print(f"Metadata saved to: {csv_path}")


# --- Run Processing for Splits (Same as before) ---
if 'train' in ds:
process_split('train', ds['train'])

eval_split = ds.get('test') or ds.get('validation')
if eval_split is not None:
process_split('eval', eval_split)
else:
print("Warning: Neither 'test' nor 'validation' split found for evaluation.")

print("\n✅ Dataset conversion complete.")

Files changed (1) hide show
  1. README.md +31 -29
README.md CHANGED
@@ -1,29 +1,31 @@
1
- ---
2
- dataset_info:
3
- features:
4
- - name: audio
5
- dtype:
6
- audio:
7
- sampling_rate: 16000
8
- decode: false
9
- - name: text
10
- dtype: string
11
- - name: speaker_name
12
- dtype: string
13
- splits:
14
- - name: train
15
- num_bytes: 6778898640
16
- num_examples: 30504
17
- - name: test
18
- num_bytes: 750280661
19
- num_examples: 3390
20
- download_size: 7125544218
21
- dataset_size: 7529179301
22
- configs:
23
- - config_name: default
24
- data_files:
25
- - split: train
26
- path: data/train-*
27
- - split: test
28
- path: data/test-*
29
- ---
 
 
 
1
+ ---
2
+ dataset_info:
3
+ features:
4
+ - name: audio
5
+ dtype:
6
+ audio:
7
+ sampling_rate: 16000
8
+ decode: false
9
+ - name: text
10
+ dtype: string
11
+ - name: speaker_name
12
+ dtype: string
13
+ splits:
14
+ - name: train
15
+ num_bytes: 6778898640
16
+ num_examples: 30504
17
+ - name: test
18
+ num_bytes: 750280661
19
+ num_examples: 3390
20
+ download_size: 7125544218
21
+ dataset_size: 7529179301
22
+ configs:
23
+ - config_name: default
24
+ data_files:
25
+ - split: train
26
+ path: data/train-*
27
+ - split: test
28
+ path: data/test-*
29
+ language:
30
+ - ro
31
+ ---