morpheus-gpt-training / scripts /create_speaker_json.py
Imakandi-Labs's picture
Upload folder using huggingface_hub
39da493 verified
#!/usr/bin/env python3
"""
YarnGPT Speaker JSON Creator
Creates speaker JSON files from audio + transcript for voice cloning.
This script:
1. Loads audio and converts to WavTokenizer codes
2. Uses whisper for forced alignment to segment audio into words
3. Creates speaker JSON files compatible with YarnGPT
"""
import os
import sys
import json
import argparse
import re
from pathlib import Path
def check_dependencies():
"""Check and install required dependencies."""
required = ['torch', 'torchaudio', 'transformers', 'outetts']
missing = []
for pkg in required:
try:
__import__(pkg)
except ImportError:
missing.append(pkg)
if missing:
print(f"Missing packages: {missing}")
print("Install with: pip install torch torchaudio transformers outetts")
return False
return True
def create_speaker_json(
audio_path: str,
text: str,
output_path: str,
wav_tokenizer_model_path: str,
wav_tokenizer_config_path: str,
language: str = "english"
):
"""
Create a speaker JSON file from an audio sample.
Args:
audio_path: Path to reference audio file (WAV)
text: Transcript of the audio
output_path: Where to save the speaker JSON
wav_tokenizer_model_path: Path to WavTokenizer model checkpoint
wav_tokenizer_config_path: Path to WavTokenizer config
language: Language of the audio (english, yoruba, igbo, hausa)
"""
import torch
import torchaudio
import numpy as np
from outetts.wav_tokenizer.decoder import WavTokenizer
from outetts.wav_tokenizer.encoder.utils import convert_audio
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
# Load WavTokenizer
print("Loading WavTokenizer...")
wavtokenizer = WavTokenizer.from_pretrained0802(wav_tokenizer_config_path, wav_tokenizer_model_path)
wavtokenizer = wavtokenizer.to(device)
# Load and resample audio to 24kHz
print(f"Loading audio: {audio_path}")
audio_data, sample_rate = torchaudio.load(audio_path)
audio_data = audio_data.squeeze().to(dtype=torch.float32)
if sample_rate != 24000:
audio_data = audio_data.unsqueeze(0)
audio_data = convert_audio(audio_data, sample_rate, 24000, 1)
audio_data = audio_data.squeeze()
audio = audio_data.unsqueeze(0).to(device)
if audio.ndim == 3:
audio = audio.squeeze(1)
# Get audio codes
print("Encoding audio to WavTokenizer codes...")
bandwidth_id = torch.tensor([0]).to(device)
_, codes = wavtokenizer.encode_infer(audio, bandwidth_id=bandwidth_id)
codes = codes.squeeze().tolist()
# Calculate total duration
total_samples = len(audio_data)
total_duration = total_samples / 24000 # 24kHz sample rate
# Split text into words (basic tokenization)
words = text.strip().split()
num_words = len(words)
# Simple uniform distribution of codes across words
# For better results, use forced alignment (whisper, wav2vec2-ctc, etc.)
codes_per_word = len(codes) // num_words
duration_per_word = total_duration / num_words
word_data = []
for i, word in enumerate(words):
start_idx = i * codes_per_word
end_idx = start_idx + codes_per_word if i < num_words - 1 else len(codes)
word_codes = codes[start_idx:end_idx]
# Normalize word for YarnGPT
normalized_word = re.sub(r'[^a-zA-Z]', '', word.lower())
if not normalized_word:
normalized_word = word.lower()
word_data.append({
"word": normalized_word,
"duration": f"{duration_per_word:.2f}",
"codes": word_codes
})
# Create speaker JSON
speaker_json = {
"text": text,
"words": word_data
}
# Save
os.makedirs(os.path.dirname(output_path) or '.', exist_ok=True)
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(speaker_json, f, ensure_ascii=False, indent=4)
print(f"Created speaker JSON: {output_path}")
print(f" - Words: {num_words}")
print(f" - Total codes: {len(codes)}")
print(f" - Duration: {total_duration:.2f}s")
return speaker_json
def main():
parser = argparse.ArgumentParser(description="Create YarnGPT speaker JSON from audio")
parser.add_argument("--audio", required=True, help="Path to audio file (WAV)")
parser.add_argument("--text", required=True, help="Transcript of the audio")
parser.add_argument("--output", required=True, help="Output JSON path")
parser.add_argument("--model", default="~/.yarngpt/models/wavtokenizer_large_speech_320_24k.ckpt",
help="WavTokenizer model path")
parser.add_argument("--config", default="~/.yarngpt/models/wavtokenizer_mediumdata_frame75_3s_nq1_code4096_dim512_kmeans200_attn.yaml",
help="WavTokenizer config path")
parser.add_argument("--language", default="english", choices=["english", "yoruba", "igbo", "hausa"],
help="Language of the audio")
args = parser.parse_args()
if not check_dependencies():
sys.exit(1)
create_speaker_json(
audio_path=args.audio,
text=args.text,
output_path=args.output,
wav_tokenizer_model_path=os.path.expanduser(args.model),
wav_tokenizer_config_path=os.path.expanduser(args.config),
language=args.language
)
if __name__ == "__main__":
main()