Ursa_Minor_Smashed / setup-cuda.sh
Kaileh57's picture
Upload folder using huggingface_hub
8691f4b verified
#!/bin/bash
# CUDA Setup script for Ursa Minor Smashed model
echo "πŸ”₯ Setting up CUDA environment for Ursa Minor Smashed model..."
# Check if NVIDIA GPU is available
if ! command -v nvidia-smi &> /dev/null; then
echo "❌ ERROR: nvidia-smi not found. Make sure NVIDIA drivers are installed."
exit 1
fi
echo "πŸ” Checking GPU information..."
nvidia-smi
# Check CUDA version
CUDA_VERSION=$(nvidia-smi | grep "CUDA Version" | awk '{print $9}' | cut -d. -f1,2)
echo "πŸ“Œ Detected CUDA Version: $CUDA_VERSION"
# Create virtual environment
echo "πŸ“¦ Creating virtual environment..."
python -m venv venv-cuda
# Activate virtual environment
if [[ "$OSTYPE" == "msys" || "$OSTYPE" == "win32" ]]; then
source venv-cuda/Scripts/activate
else
source venv-cuda/bin/activate
fi
echo "βœ… Virtual environment activated"
# Upgrade pip
pip install --upgrade pip
# Install CUDA requirements
echo "πŸš€ Installing CUDA requirements..."
echo "This may take a few minutes as PyTorch CUDA packages are large..."
# Detect CUDA version and install appropriate PyTorch
if [[ "$CUDA_VERSION" == "12."* ]]; then
echo "Installing PyTorch for CUDA 12.1..."
pip install torch torchaudio torchvision --extra-index-url https://download.pytorch.org/whl/cu121
elif [[ "$CUDA_VERSION" == "11."* ]]; then
echo "Installing PyTorch for CUDA 11.8..."
pip install torch torchaudio torchvision --extra-index-url https://download.pytorch.org/whl/cu118
else
echo "⚠️ Warning: Unsupported CUDA version $CUDA_VERSION"
echo "Installing default CUDA 11.8 PyTorch..."
pip install torch torchaudio torchvision --extra-index-url https://download.pytorch.org/whl/cu118
fi
# Install remaining requirements
echo "πŸ“‹ Installing remaining dependencies..."
pip install -r requirements-cuda.txt
# Test CUDA availability
echo "πŸ§ͺ Testing CUDA setup..."
python -c "
import torch
print(f'PyTorch version: {torch.__version__}')
print(f'CUDA available: {torch.cuda.is_available()}')
if torch.cuda.is_available():
print(f'CUDA device count: {torch.cuda.device_count()}')
print(f'Current CUDA device: {torch.cuda.current_device()}')
print(f'CUDA device name: {torch.cuda.get_device_name()}')
print(f'CUDA version: {torch.version.cuda}')
else:
print('❌ CUDA not available in PyTorch installation')
exit(1)
"
# Verify model file exists
if [[ -f "model_optimized.pt" ]]; then
echo "βœ… Model file found: model_optimized.pt"
else
echo "⚠️ Warning: model_optimized.pt not found in current directory"
echo "Make sure you have the model file in the same directory as this script"
fi
echo ""
echo "πŸŽ‰ CUDA setup complete!"
echo ""
echo "πŸ“– Usage Instructions:"
echo "To activate CUDA environment:"
if [[ "$OSTYPE" == "msys" || "$OSTYPE" == "win32" ]]; then
echo " source venv-cuda/Scripts/activate"
else
echo " source venv-cuda/bin/activate"
fi
echo ""
echo "To run CUDA inference:"
echo " python inference_cuda.py --prompt 'Your prompt here'"
echo ""
echo "To run CUDA chat:"
echo " python chat_cuda.py"
echo ""
echo "To run CUDA benchmark:"
echo " python benchmark_cuda.py"
echo ""
echo "πŸ“Š Test your setup:"
echo " python -c \"import torch; print('CUDA available:', torch.cuda.is_available())\""