File size: 1,635 Bytes
3907f97 3448384 3907f97 3448384 3907f97 3448384 3907f97 3448384 3907f97 3448384 3907f97 3448384 3907f97 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | {
"name": "xtts_v2_mobile",
"version": "2.0.3",
"description": "XTTS v2 optimized for mobile deployment with TorchScript",
"export_type": "torchscript",
"torch_version": "2.8.0",
"sample_rate_hz": 24000,
"languages": [
"en", "es", "fr", "de", "it", "pt", "pl", "tr",
"ru", "nl", "cs", "ar", "zh", "ja", "ko", "hu", "hi"
],
"variants": {
"original": {
"file": "original/xtts_infer_original.ts",
"size_mb": 1162.64,
"quantization": "none",
"memory_estimate_mb": 1500,
"recommended_ram_gb": 4
},
"fp16": {
"file": "fp16/xtts_infer_fp16.ts",
"size_mb": 581.40,
"quantization": "fp16",
"memory_estimate_mb": 800,
"recommended_ram_gb": 3
}
},
"usage": {
"android": {
"gradle": "implementation 'org.pytorch:pytorch_android_lite:2.1.0'",
"load": "Module module = Module.load(modelPath);",
"inference": "Tensor output = module.forward(IValue.from(text), IValue.from(lang)).toTensor();"
},
"ios": {
"podfile": "pod 'LibTorch-Lite', '~> 2.1.0'",
"load": "TorchModule *module = [[TorchModule alloc] initWithFileAtPath:modelPath];",
"inference": "at::Tensor output = [module forward:@[text, language]];"
},
"react_native": {
"download": "See README for download instructions",
"native_module": "XTTSModule.speak(text, language)"
}
},
"recommendations": {
"best_quality": "original",
"best_balance": "fp16",
"low_memory_devices": "Use fp16 variant for devices with <4GB RAM",
"high_end_devices": "Use original for flagship devices with 6GB+ RAM"
}
} |