File size: 568 Bytes
10d27da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
#!/bin/bash
set -euo pipefail

# where model will be stored
MODEL_DIR="model"
mkdir -p $MODEL_DIR

# Download GGUF model if not present (CHANGE URL if you have a preferred GGUF)
GGUF_URL="https://huggingface.co/TheBloke/llama-2-7b-chat-GGUF/resolve/main/llama-2-7b-chat.gguf"  # example; may be large
GGUF_FILE="$MODEL_DIR/model.gguf"

if [ ! -f "$GGUF_FILE" ]; then
  echo "Downloading GGUF model (this may be large)..."
  curl -L -o "$GGUF_FILE" "$GGUF_URL"
fi

# Run the web app (python will import llama-cpp-python which builds its wheel at install)
python app.py