| # Setup script for NanoGPT inference environment | |
| echo "Setting up NanoGPT inference environment..." | |
| # Create virtual environment | |
| python -m venv venv | |
| source venv/bin/activate | |
| # Install base requirements | |
| pip install torch numpy tiktoken tqdm | |
| # Install optional dependencies | |
| echo "Installing optional dependencies..." | |
| pip install transformers # For HuggingFace integration | |
| pip install gguf # For GGUF conversion | |
| pip install matplotlib jupyter # For visualization | |
| # Verify model file exists | |
| if [[ -f "model_optimized.pt" ]]; then | |
| echo "✓ Model file found: model_optimized.pt" | |
| else | |
| echo "⚠ Warning: model_optimized.pt not found in current directory" | |
| echo "Make sure you have the model file in the same directory as this script" | |
| fi | |
| # Clone llama.cpp for GGUF support (optional) | |
| read -p "Setup llama.cpp for GGUF support? (y/n) " -n 1 -r | |
| echo | |
| if [[ $REPLY =~ ^[Yy]$ ]]; then | |
| git clone https://github.com/ggerganov/llama.cpp | |
| cd llama.cpp | |
| make | |
| cd .. | |
| echo "llama.cpp built successfully" | |
| fi | |
| echo "Setup complete!" | |
| echo "" | |
| echo "To activate environment: source venv/bin/activate" | |
| echo "To run inference: python inference.py --prompt 'Your prompt here'" | |
| echo "To run chat: python chat.py" | |
| echo "To run examples: cd examples && python basic_usage.py" |