File size: 5,622 Bytes
39da493
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
#!/usr/bin/env python3
"""
YarnGPT Speaker JSON Creator
Creates speaker JSON files from audio + transcript for voice cloning.

This script:
1. Loads audio and converts to WavTokenizer codes
2. Uses whisper for forced alignment to segment audio into words
3. Creates speaker JSON files compatible with YarnGPT
"""

import os
import sys
import json
import argparse
import re
from pathlib import Path

def check_dependencies():
    """Check and install required dependencies."""
    required = ['torch', 'torchaudio', 'transformers', 'outetts']
    missing = []
    for pkg in required:
        try:
            __import__(pkg)
        except ImportError:
            missing.append(pkg)
    if missing:
        print(f"Missing packages: {missing}")
        print("Install with: pip install torch torchaudio transformers outetts")
        return False
    return True

def create_speaker_json(
    audio_path: str,
    text: str,
    output_path: str,
    wav_tokenizer_model_path: str,
    wav_tokenizer_config_path: str,
    language: str = "english"
):
    """
    Create a speaker JSON file from an audio sample.
    
    Args:
        audio_path: Path to reference audio file (WAV)
        text: Transcript of the audio
        output_path: Where to save the speaker JSON
        wav_tokenizer_model_path: Path to WavTokenizer model checkpoint
        wav_tokenizer_config_path: Path to WavTokenizer config
        language: Language of the audio (english, yoruba, igbo, hausa)
    """
    import torch
    import torchaudio
    import numpy as np
    from outetts.wav_tokenizer.decoder import WavTokenizer
    from outetts.wav_tokenizer.encoder.utils import convert_audio
    
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")
    
    # Load WavTokenizer
    print("Loading WavTokenizer...")
    wavtokenizer = WavTokenizer.from_pretrained0802(wav_tokenizer_config_path, wav_tokenizer_model_path)
    wavtokenizer = wavtokenizer.to(device)
    
    # Load and resample audio to 24kHz
    print(f"Loading audio: {audio_path}")
    audio_data, sample_rate = torchaudio.load(audio_path)
    audio_data = audio_data.squeeze().to(dtype=torch.float32)
    
    if sample_rate != 24000:
        audio_data = audio_data.unsqueeze(0)
        audio_data = convert_audio(audio_data, sample_rate, 24000, 1)
        audio_data = audio_data.squeeze()
    
    audio = audio_data.unsqueeze(0).to(device)
    if audio.ndim == 3:
        audio = audio.squeeze(1)
    
    # Get audio codes
    print("Encoding audio to WavTokenizer codes...")
    bandwidth_id = torch.tensor([0]).to(device)
    _, codes = wavtokenizer.encode_infer(audio, bandwidth_id=bandwidth_id)
    codes = codes.squeeze().tolist()
    
    # Calculate total duration
    total_samples = len(audio_data)
    total_duration = total_samples / 24000  # 24kHz sample rate
    
    # Split text into words (basic tokenization)
    words = text.strip().split()
    num_words = len(words)
    
    # Simple uniform distribution of codes across words
    # For better results, use forced alignment (whisper, wav2vec2-ctc, etc.)
    codes_per_word = len(codes) // num_words
    duration_per_word = total_duration / num_words
    
    word_data = []
    for i, word in enumerate(words):
        start_idx = i * codes_per_word
        end_idx = start_idx + codes_per_word if i < num_words - 1 else len(codes)
        word_codes = codes[start_idx:end_idx]
        
        # Normalize word for YarnGPT
        normalized_word = re.sub(r'[^a-zA-Z]', '', word.lower())
        if not normalized_word:
            normalized_word = word.lower()
        
        word_data.append({
            "word": normalized_word,
            "duration": f"{duration_per_word:.2f}",
            "codes": word_codes
        })
    
    # Create speaker JSON
    speaker_json = {
        "text": text,
        "words": word_data
    }
    
    # Save
    os.makedirs(os.path.dirname(output_path) or '.', exist_ok=True)
    with open(output_path, 'w', encoding='utf-8') as f:
        json.dump(speaker_json, f, ensure_ascii=False, indent=4)
    
    print(f"Created speaker JSON: {output_path}")
    print(f"  - Words: {num_words}")
    print(f"  - Total codes: {len(codes)}")
    print(f"  - Duration: {total_duration:.2f}s")
    
    return speaker_json


def main():
    parser = argparse.ArgumentParser(description="Create YarnGPT speaker JSON from audio")
    parser.add_argument("--audio", required=True, help="Path to audio file (WAV)")
    parser.add_argument("--text", required=True, help="Transcript of the audio")
    parser.add_argument("--output", required=True, help="Output JSON path")
    parser.add_argument("--model", default="~/.yarngpt/models/wavtokenizer_large_speech_320_24k.ckpt",
                       help="WavTokenizer model path")
    parser.add_argument("--config", default="~/.yarngpt/models/wavtokenizer_mediumdata_frame75_3s_nq1_code4096_dim512_kmeans200_attn.yaml",
                       help="WavTokenizer config path")
    parser.add_argument("--language", default="english", choices=["english", "yoruba", "igbo", "hausa"],
                       help="Language of the audio")
    
    args = parser.parse_args()
    
    if not check_dependencies():
        sys.exit(1)
    
    create_speaker_json(
        audio_path=args.audio,
        text=args.text,
        output_path=args.output,
        wav_tokenizer_model_path=os.path.expanduser(args.model),
        wav_tokenizer_config_path=os.path.expanduser(args.config),
        language=args.language
    )


if __name__ == "__main__":
    main()