|
|
@echo off
|
|
|
|
|
|
|
|
|
echo π₯ Setting up CUDA environment for Ursa Minor Smashed model...
|
|
|
|
|
|
|
|
|
nvidia-smi >nul 2>&1
|
|
|
if errorlevel 1 (
|
|
|
echo β ERROR: nvidia-smi not found. Make sure NVIDIA drivers are installed.
|
|
|
pause
|
|
|
exit /b 1
|
|
|
)
|
|
|
|
|
|
echo π Checking GPU information...
|
|
|
nvidia-smi
|
|
|
|
|
|
|
|
|
echo π Please ensure you have CUDA 11.8 or 12.1 installed
|
|
|
|
|
|
|
|
|
echo π¦ Creating virtual environment...
|
|
|
python -m venv venv-cuda
|
|
|
|
|
|
|
|
|
echo β
Activating virtual environment...
|
|
|
call venv-cuda\Scripts\activate
|
|
|
|
|
|
|
|
|
python -m pip install --upgrade pip
|
|
|
|
|
|
|
|
|
echo π Installing CUDA requirements...
|
|
|
echo This may take a few minutes as PyTorch CUDA packages are large...
|
|
|
|
|
|
|
|
|
echo.
|
|
|
echo Please select your CUDA version:
|
|
|
echo 1. CUDA 11.8
|
|
|
echo 2. CUDA 12.1
|
|
|
echo 3. Auto-detect (default)
|
|
|
set /p cuda_choice=Enter choice (1-3, default 3):
|
|
|
|
|
|
if "%cuda_choice%"=="1" (
|
|
|
echo Installing PyTorch for CUDA 11.8...
|
|
|
pip install torch torchaudio torchvision --extra-index-url https://download.pytorch.org/whl/cu118
|
|
|
) else if "%cuda_choice%"=="2" (
|
|
|
echo Installing PyTorch for CUDA 12.1...
|
|
|
pip install torch torchaudio torchvision --extra-index-url https://download.pytorch.org/whl/cu121
|
|
|
) else (
|
|
|
echo Installing PyTorch for CUDA 11.8 (default)...
|
|
|
pip install torch torchaudio torchvision --extra-index-url https://download.pytorch.org/whl/cu118
|
|
|
)
|
|
|
|
|
|
|
|
|
echo π Installing remaining dependencies...
|
|
|
pip install numpy>=1.24.0 tiktoken>=0.5.0 tqdm>=4.65.0
|
|
|
pip install gguf>=0.6.0 sentencepiece>=0.1.99 safetensors>=0.4.0 psutil>=5.8.0
|
|
|
pip install pynvml>=11.4.1 nvidia-ml-py3>=7.352.0
|
|
|
pip install matplotlib>=3.7.0 jupyter>=1.0.0
|
|
|
|
|
|
|
|
|
echo π§ͺ Testing CUDA setup...
|
|
|
python -c "import torch; print(f'PyTorch version: {torch.__version__}'); print(f'CUDA available: {torch.cuda.is_available()}'); import sys; sys.exit(0 if torch.cuda.is_available() else 1)"
|
|
|
|
|
|
if errorlevel 1 (
|
|
|
echo β CUDA not available in PyTorch installation
|
|
|
pause
|
|
|
exit /b 1
|
|
|
)
|
|
|
|
|
|
python -c "import torch; print(f'CUDA device count: {torch.cuda.device_count()}'); print(f'Current CUDA device: {torch.cuda.current_device()}'); print(f'CUDA device name: {torch.cuda.get_device_name()}'); print(f'CUDA version: {torch.version.cuda}')"
|
|
|
|
|
|
|
|
|
if exist "model_optimized.pt" (
|
|
|
echo β
Model file found: model_optimized.pt
|
|
|
) else (
|
|
|
echo β οΈ Warning: model_optimized.pt not found in current directory
|
|
|
echo Make sure you have the model file in the same directory as this script
|
|
|
)
|
|
|
|
|
|
echo.
|
|
|
echo π CUDA setup complete!
|
|
|
echo.
|
|
|
echo π Usage Instructions:
|
|
|
echo To activate CUDA environment:
|
|
|
echo venv-cuda\Scripts\activate
|
|
|
echo.
|
|
|
echo To run CUDA inference:
|
|
|
echo python inference_cuda.py --prompt "Your prompt here"
|
|
|
echo.
|
|
|
echo To run CUDA chat:
|
|
|
echo python chat_cuda.py
|
|
|
echo.
|
|
|
echo To run CUDA benchmark:
|
|
|
echo python benchmark_cuda.py
|
|
|
echo.
|
|
|
echo π Test your setup:
|
|
|
echo python -c "import torch; print('CUDA available:', torch.cuda.is_available())"
|
|
|
|
|
|
pause |