|
|
#!/bin/bash |
|
|
|
|
|
|
|
|
echo "π₯ Setting up CUDA environment for Ursa Minor Smashed model..." |
|
|
|
|
|
|
|
|
if ! command -v nvidia-smi &> /dev/null; then |
|
|
echo "β ERROR: nvidia-smi not found. Make sure NVIDIA drivers are installed." |
|
|
exit 1 |
|
|
fi |
|
|
|
|
|
echo "π Checking GPU information..." |
|
|
nvidia-smi |
|
|
|
|
|
|
|
|
CUDA_VERSION=$(nvidia-smi | grep "CUDA Version" | awk '{print $9}' | cut -d. -f1,2) |
|
|
echo "π Detected CUDA Version: $CUDA_VERSION" |
|
|
|
|
|
|
|
|
echo "π¦ Creating virtual environment..." |
|
|
python -m venv venv-cuda |
|
|
|
|
|
|
|
|
if [[ "$OSTYPE" == "msys" || "$OSTYPE" == "win32" ]]; then |
|
|
source venv-cuda/Scripts/activate |
|
|
else |
|
|
source venv-cuda/bin/activate |
|
|
fi |
|
|
|
|
|
echo "β
Virtual environment activated" |
|
|
|
|
|
|
|
|
pip install --upgrade pip |
|
|
|
|
|
|
|
|
echo "π Installing CUDA requirements..." |
|
|
echo "This may take a few minutes as PyTorch CUDA packages are large..." |
|
|
|
|
|
|
|
|
if [[ "$CUDA_VERSION" == "12."* ]]; then |
|
|
echo "Installing PyTorch for CUDA 12.1..." |
|
|
pip install torch torchaudio torchvision --extra-index-url https://download.pytorch.org/whl/cu121 |
|
|
elif [[ "$CUDA_VERSION" == "11."* ]]; then |
|
|
echo "Installing PyTorch for CUDA 11.8..." |
|
|
pip install torch torchaudio torchvision --extra-index-url https://download.pytorch.org/whl/cu118 |
|
|
else |
|
|
echo "β οΈ Warning: Unsupported CUDA version $CUDA_VERSION" |
|
|
echo "Installing default CUDA 11.8 PyTorch..." |
|
|
pip install torch torchaudio torchvision --extra-index-url https://download.pytorch.org/whl/cu118 |
|
|
fi |
|
|
|
|
|
|
|
|
echo "π Installing remaining dependencies..." |
|
|
pip install -r requirements-cuda.txt |
|
|
|
|
|
|
|
|
echo "π§ͺ Testing CUDA setup..." |
|
|
python -c " |
|
|
import torch |
|
|
print(f'PyTorch version: {torch.__version__}') |
|
|
print(f'CUDA available: {torch.cuda.is_available()}') |
|
|
if torch.cuda.is_available(): |
|
|
print(f'CUDA device count: {torch.cuda.device_count()}') |
|
|
print(f'Current CUDA device: {torch.cuda.current_device()}') |
|
|
print(f'CUDA device name: {torch.cuda.get_device_name()}') |
|
|
print(f'CUDA version: {torch.version.cuda}') |
|
|
else: |
|
|
print('β CUDA not available in PyTorch installation') |
|
|
exit(1) |
|
|
" |
|
|
|
|
|
|
|
|
if [[ -f "model_optimized.pt" ]]; then |
|
|
echo "β
Model file found: model_optimized.pt" |
|
|
else |
|
|
echo "β οΈ Warning: model_optimized.pt not found in current directory" |
|
|
echo "Make sure you have the model file in the same directory as this script" |
|
|
fi |
|
|
|
|
|
echo "" |
|
|
echo "π CUDA setup complete!" |
|
|
echo "" |
|
|
echo "π Usage Instructions:" |
|
|
echo "To activate CUDA environment:" |
|
|
if [[ "$OSTYPE" == "msys" || "$OSTYPE" == "win32" ]]; then |
|
|
echo " source venv-cuda/Scripts/activate" |
|
|
else |
|
|
echo " source venv-cuda/bin/activate" |
|
|
fi |
|
|
echo "" |
|
|
echo "To run CUDA inference:" |
|
|
echo " python inference_cuda.py --prompt 'Your prompt here'" |
|
|
echo "" |
|
|
echo "To run CUDA chat:" |
|
|
echo " python chat_cuda.py" |
|
|
echo "" |
|
|
echo "To run CUDA benchmark:" |
|
|
echo " python benchmark_cuda.py" |
|
|
echo "" |
|
|
echo "π Test your setup:" |
|
|
echo " python -c \"import torch; print('CUDA available:', torch.cuda.is_available())\"" |