tar
cd /home/vladimir_albrekht/projects/2025_sep_22_qwen3omni/ms_swift_training/approach_2_transformers_based/data/fleurs_data
tar -czf - fleurs_cleaned_kk_not_translated | split -b 4G - fleurs_cleaned_kk_not_translated.tar.gz.part
cat
cat fleurs_cleaned_kk_not_translated.tar.gz.part* | tar -xzf -
processing logic
import pandas as pd
from pathlib import Path
def load_fleurs_split(split_name, base_path="/home/vladimir_albrekht/projects/2025_sep_22_qwen3omni/ms_swift_training/approach_2_transformers_based/data/fleurs_data/fleurs/data/kk_kz"):
df = pd.read_csv(
f"{base_path}/{split_name}.tsv",
sep="\t",
header=None,
names=["id", "file_name", "raw_transcription", "transcription", "phonemes", "num_samples", "gender"]
)
df['num_samples'] = pd.to_numeric(df['num_samples'], errors='coerce')
df['duration_seconds'] = df['num_samples'] / 16000
df['dataset_type'] = split_name
df['audio_dir'] = f"{base_path}/audio/{split_name}"
return df
df_test = load_fleurs_split("test")
df_dev = load_fleurs_split("dev")
df_train = load_fleurs_split("train")
df = pd.concat([df_test, df_dev, df_train], ignore_index=True)
df_cleaned = df[df['duration_seconds'] <= 29.9]
duplicate_files = df_cleaned['file_name'].duplicated().sum()
unique_files = df_cleaned['file_name'].nunique()
total_files = len(df_cleaned)
print(f"Total samples: {total_files}")
df_cleaned = df_cleaned.drop(columns=['phonemes', 'num_samples'])
df_cleaned = df_cleaned.rename(columns={
'raw_transcription': 'transcription',
'transcription': 'raw_transcription'
})
import shutil
from pathlib import Path
output_dir = Path("/home/vladimir_albrekht/projects/2025_sep_22_qwen3omni/ms_swift_training/approach_2_transformers_based/data/fleurs_data/fleurs_cleaned_kk")
output_dir.mkdir(exist_ok=True)
audio_output_dir = output_dir / "audios"
audio_output_dir.mkdir(exist_ok=True)
jsonl_data = []
for idx, row in df_cleaned.iterrows():
src_audio = Path(row['audio_dir']) / row['file_name']
new_audio_name = f"{row['dataset_type']}_{row['file_name']}"
dst_audio = audio_output_dir / new_audio_name
shutil.copy(src_audio, dst_audio)
jsonl_data.append({
"transcription": row['transcription'],
"audio_path": f"audios/{new_audio_name}",
"meta_data": {
"id": row['id'],
"raw_transcription": row['raw_transcription'],
"duration_seconds": row['duration_seconds'],
"gender": row['gender'],
"dataset_type": row['dataset_type']
}
})
import json
with open(output_dir / "data.jsonl", 'w', encoding='utf-8') as f:
for item in jsonl_data:
f.write(json.dumps(item, ensure_ascii=False) + '\n')
print(f"Saved {len(jsonl_data)} samples to {output_dir}")