Joycaption-basic / start.sh
Hug0endob's picture
Create start.sh
10d27da verified
raw
history blame
568 Bytes
#!/bin/bash
set -euo pipefail
# where model will be stored
MODEL_DIR="model"
mkdir -p $MODEL_DIR
# Download GGUF model if not present (CHANGE URL if you have a preferred GGUF)
GGUF_URL="https://huggingface.co/TheBloke/llama-2-7b-chat-GGUF/resolve/main/llama-2-7b-chat.gguf" # example; may be large
GGUF_FILE="$MODEL_DIR/model.gguf"
if [ ! -f "$GGUF_FILE" ]; then
echo "Downloading GGUF model (this may be large)..."
curl -L -o "$GGUF_FILE" "$GGUF_URL"
fi
# Run the web app (python will import llama-cpp-python which builds its wheel at install)
python app.py