Chatterbox-Finnish / install_dependencies.sh
Finnish-NLP
Improve setup for multi-GPU support and fix inference docs
d15775e
#!/bin/bash
# Chatterbox Finnish TTS - Dependency Installation Script
# Automatically selects the correct PyTorch/CUDA version for your GPU.
set -e # Exit on error
echo "===================================="
echo "Chatterbox Finnish TTS Setup"
echo "===================================="
# Check Python version
PYTHON_VERSION=$(python --version 2>&1 | grep -oP '(?<=Python )\d+\.\d+')
echo "Python version: $PYTHON_VERSION"
# Detect GPU compute capability and select appropriate PyTorch build
echo ""
echo "Detecting GPU architecture..."
IS_BLACKWELL=$(python -c "
import subprocess
try:
r = subprocess.run(
['nvidia-smi', '--query-gpu=compute_cap', '--format=csv,noheader'],
capture_output=True, text=True
)
caps = [float(c.strip()) for c in r.stdout.strip().splitlines() if c.strip()]
print('1' if caps and max(caps) >= 12.0 else '0')
except Exception:
print('0')
" 2>/dev/null || echo "0")
if [ "$IS_BLACKWELL" = "1" ]; then
echo "Blackwell GPU detected (sm_120+) — using PyTorch 2.10.0 + CUDA 12.8"
TORCH_VERSION="2.10.0"
TORCHVISION_VERSION="0.25.0"
TORCHAUDIO_VERSION="2.10.0"
CUDA_TAG="cu128"
XFORMERS_VERSION="0.0.35"
else
echo "Pre-Blackwell GPU detected — using PyTorch 2.5.1 + CUDA 12.4"
TORCH_VERSION="2.5.1"
TORCHVISION_VERSION="0.20.1"
TORCHAUDIO_VERSION="2.5.1"
CUDA_TAG="cu124"
XFORMERS_VERSION="0.0.28.post3"
fi
# Uninstall conflicting packages
echo ""
echo "Step 1: Removing conflicting packages..."
pip uninstall -y torch torchvision torchaudio xformers flash-attn 2>/dev/null || true
# Install PyTorch
echo ""
echo "Step 2: Installing PyTorch ${TORCH_VERSION} with CUDA ${CUDA_TAG}..."
pip install \
torch==${TORCH_VERSION} \
torchvision==${TORCHVISION_VERSION} \
torchaudio==${TORCHAUDIO_VERSION} \
--index-url https://download.pytorch.org/whl/${CUDA_TAG}
# Install xformers
echo ""
echo "Step 3: Installing xformers ${XFORMERS_VERSION}..."
pip install xformers==${XFORMERS_VERSION} --index-url https://download.pytorch.org/whl/${CUDA_TAG}
# Install torchao (compatible with both PyTorch versions)
echo ""
echo "Step 4: Installing torchao..."
pip install torchao==0.6.1
# Install all other dependencies
echo ""
echo "Step 5: Installing remaining dependencies..."
pip install -r requirements.txt
# Fix potential cuDNN conflict: ensure PyTorch's bundled cuDNN takes priority
echo ""
echo "Step 6: Configuring cuDNN library path..."
CUDNN_PATH=$(python -c "
import os
try:
import nvidia.cudnn
print(os.path.join(os.path.dirname(nvidia.cudnn.__file__), 'lib'))
except Exception:
print('')
" 2>/dev/null)
if [ -n "$CUDNN_PATH" ] && [ -d "$CUDNN_PATH" ]; then
PROFILE_LINE="export LD_LIBRARY_PATH=${CUDNN_PATH}:\$LD_LIBRARY_PATH"
# Add to ~/.bashrc if not already present
if ! grep -qF "$CUDNN_PATH" ~/.bashrc 2>/dev/null; then
echo "$PROFILE_LINE" >> ~/.bashrc
fi
# Apply for the current session
export LD_LIBRARY_PATH="${CUDNN_PATH}:${LD_LIBRARY_PATH}"
echo "cuDNN path set to: $CUDNN_PATH"
else
echo "No bundled cuDNN found — skipping."
fi
# Verify installation
echo ""
echo "===================================="
echo "Verifying installation..."
echo "===================================="
python -c "
import torch
import xformers
import transformers
print(f' PyTorch: {torch.__version__}')
print(f' xformers: {xformers.__version__}')
print(f' Transformers: {transformers.__version__}')
print(f' CUDA available: {torch.cuda.is_available()}')
if torch.cuda.is_available():
print(f' CUDA version: {torch.version.cuda}')
props = torch.cuda.get_device_properties(0)
print(f' GPU: {props.name} (sm_{props.major}{props.minor})')
"
echo ""
echo "===================================="
echo "Installation complete!"
echo "===================================="
echo ""
echo "Next steps:"
echo "1. Run: python setup.py (download pretrained base models)"
echo "2. Run: python inference_example.py (run Finnish TTS inference)"
echo "3. Run: python train.py (optional: start fine-tuning)"
echo ""