#!/bin/bash # CUDA Setup script for Ursa Minor Smashed model echo "๐Ÿ”ฅ Setting up CUDA environment for Ursa Minor Smashed model..." # Check if NVIDIA GPU is available if ! command -v nvidia-smi &> /dev/null; then echo "โŒ ERROR: nvidia-smi not found. Make sure NVIDIA drivers are installed." exit 1 fi echo "๐Ÿ” Checking GPU information..." nvidia-smi # Check CUDA version CUDA_VERSION=$(nvidia-smi | grep "CUDA Version" | awk '{print $9}' | cut -d. -f1,2) echo "๐Ÿ“Œ Detected CUDA Version: $CUDA_VERSION" # Create virtual environment echo "๐Ÿ“ฆ Creating virtual environment..." python -m venv venv-cuda # Activate virtual environment if [[ "$OSTYPE" == "msys" || "$OSTYPE" == "win32" ]]; then source venv-cuda/Scripts/activate else source venv-cuda/bin/activate fi echo "โœ… Virtual environment activated" # Upgrade pip pip install --upgrade pip # Install CUDA requirements echo "๐Ÿš€ Installing CUDA requirements..." echo "This may take a few minutes as PyTorch CUDA packages are large..." # Detect CUDA version and install appropriate PyTorch if [[ "$CUDA_VERSION" == "12."* ]]; then echo "Installing PyTorch for CUDA 12.1..." pip install torch torchaudio torchvision --extra-index-url https://download.pytorch.org/whl/cu121 elif [[ "$CUDA_VERSION" == "11."* ]]; then echo "Installing PyTorch for CUDA 11.8..." pip install torch torchaudio torchvision --extra-index-url https://download.pytorch.org/whl/cu118 else echo "โš ๏ธ Warning: Unsupported CUDA version $CUDA_VERSION" echo "Installing default CUDA 11.8 PyTorch..." pip install torch torchaudio torchvision --extra-index-url https://download.pytorch.org/whl/cu118 fi # Install remaining requirements echo "๐Ÿ“‹ Installing remaining dependencies..." pip install -r requirements-cuda.txt # Test CUDA availability echo "๐Ÿงช Testing CUDA setup..." python -c " import torch print(f'PyTorch version: {torch.__version__}') print(f'CUDA available: {torch.cuda.is_available()}') if torch.cuda.is_available(): print(f'CUDA device count: {torch.cuda.device_count()}') print(f'Current CUDA device: {torch.cuda.current_device()}') print(f'CUDA device name: {torch.cuda.get_device_name()}') print(f'CUDA version: {torch.version.cuda}') else: print('โŒ CUDA not available in PyTorch installation') exit(1) " # Verify model file exists if [[ -f "model_optimized.pt" ]]; then echo "โœ… Model file found: model_optimized.pt" else echo "โš ๏ธ Warning: model_optimized.pt not found in current directory" echo "Make sure you have the model file in the same directory as this script" fi echo "" echo "๐ŸŽ‰ CUDA setup complete!" echo "" echo "๐Ÿ“– Usage Instructions:" echo "To activate CUDA environment:" if [[ "$OSTYPE" == "msys" || "$OSTYPE" == "win32" ]]; then echo " source venv-cuda/Scripts/activate" else echo " source venv-cuda/bin/activate" fi echo "" echo "To run CUDA inference:" echo " python inference_cuda.py --prompt 'Your prompt here'" echo "" echo "To run CUDA chat:" echo " python chat_cuda.py" echo "" echo "To run CUDA benchmark:" echo " python benchmark_cuda.py" echo "" echo "๐Ÿ“Š Test your setup:" echo " python -c \"import torch; print('CUDA available:', torch.cuda.is_available())\""