|
|
#!/bin/bash |
|
|
|
|
|
|
|
|
|
|
|
set -e |
|
|
|
|
|
echo "π Setting up Unsloth dual-GPU LoRA training environment..." |
|
|
|
|
|
|
|
|
if [ ! -f "cli/main.py" ]; then |
|
|
echo "β Error: Please run this script from the humigence directory" |
|
|
exit 1 |
|
|
fi |
|
|
|
|
|
|
|
|
python_version=$(python3 --version 2>&1 | cut -d' ' -f2 | cut -d'.' -f1,2) |
|
|
required_version="3.8" |
|
|
if [ "$(printf '%s\n' "$required_version" "$python_version" | sort -V | head -n1)" != "$required_version" ]; then |
|
|
echo "β Error: Python 3.8+ required, found $python_version" |
|
|
exit 1 |
|
|
fi |
|
|
|
|
|
echo "β
Python version: $python_version" |
|
|
|
|
|
|
|
|
if ! command -v nvidia-smi &> /dev/null; then |
|
|
echo "β οΈ Warning: nvidia-smi not found. CUDA may not be available." |
|
|
else |
|
|
echo "β
CUDA detected:" |
|
|
nvidia-smi --query-gpu=name,memory.total --format=csv,noheader,nounits |
|
|
fi |
|
|
|
|
|
|
|
|
echo "π¦ Installing PyTorch with CUDA support..." |
|
|
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121 |
|
|
|
|
|
|
|
|
echo "π¦ Installing other dependencies..." |
|
|
pip install transformers>=4.36.0 datasets>=2.14.0 accelerate>=0.24.0 peft>=0.7.0 bitsandbytes>=0.41.0 |
|
|
|
|
|
|
|
|
echo "π¦ Installing Unsloth from source..." |
|
|
pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git" |
|
|
|
|
|
|
|
|
echo "π¦ Installing CLI dependencies..." |
|
|
pip install rich>=13.0.0 inquirer>=3.1.0 typer>=0.9.0 numpy>=1.24.0 pandas>=2.0.0 tqdm>=4.65.0 |
|
|
|
|
|
|
|
|
echo "π Creating output directories..." |
|
|
mkdir -p runs/humigence |
|
|
mkdir -p humigence_data |
|
|
|
|
|
|
|
|
echo "π§ͺ Testing installation..." |
|
|
python3 -c " |
|
|
import torch |
|
|
import transformers |
|
|
import datasets |
|
|
import accelerate |
|
|
import peft |
|
|
import bitsandbytes |
|
|
print('β
All core dependencies imported successfully') |
|
|
|
|
|
# Test CUDA |
|
|
if torch.cuda.is_available(): |
|
|
print(f'β
CUDA available: {torch.cuda.device_count()} GPU(s)') |
|
|
for i in range(torch.cuda.device_count()): |
|
|
print(f' GPU {i}: {torch.cuda.get_device_name(i)}') |
|
|
else: |
|
|
print('β οΈ CUDA not available - training will be slower') |
|
|
|
|
|
# Test Unsloth |
|
|
try: |
|
|
import unsloth |
|
|
print('β
Unsloth imported successfully') |
|
|
except ImportError as e: |
|
|
print(f'β Unsloth import failed: {e}') |
|
|
exit(1) |
|
|
" |
|
|
|
|
|
echo "" |
|
|
echo "π Setup completed successfully!" |
|
|
echo "" |
|
|
echo "To start training:" |
|
|
echo " python3 cli/main.py" |
|
|
echo "" |
|
|
echo "Available options:" |
|
|
echo " 1. Supervised Fine-Tuning (Unsloth + Dual-GPU) π" |
|
|
echo " 2. Single-GPU LoRA Training β
" |
|
|
echo "" |
|
|
echo "For dual-GPU training, ensure you have 2+ GPUs available." |
|
|
echo "The system will automatically detect and use available GPUs." |
|
|
|