File size: 1,910 Bytes
faebf07
 
 
4d1ec4d
 
 
 
 
 
1987c97
 
 
 
 
 
 
 
4d1ec4d
 
 
 
 
 
faebf07
 
4d1ec4d
faebf07
4d1ec4d
 
 
faebf07
4d1ec4d
 
 
895bb47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
#!/bin/bash
set -euo pipefail

log() {
  echo "[ENTRYPOINT] $1"
}

log "Boot sequence started at $(date -u +"%Y-%m-%d %H:%M:%S UTC")"

# Debug installed transformers version to ensure modeling_layers exists
python - <<'PY'
import importlib.util, transformers
print(f"[ENTRYPOINT] transformers version: {transformers.__version__}")
spec = importlib.util.find_spec("transformers.modeling_layers")
print(f"[ENTRYPOINT] transformers.modeling_layers available: {bool(spec)}")
PY

if [[ -z "${DATABASE_URL:-}" ]]; then
  log "DATABASE_URL is empty -> Django will fallback to POSTGRES_* or SQLite"
else
  log "DATABASE_URL detected (length: ${#DATABASE_URL})"
fi

cd /app

log "Running migrations..."
python hue_portal/manage.py migrate --noinput
log "Migrations completed."

log "Ensuring cache table exists..."
python hue_portal/manage.py createcachetable
log "Cache table ready."

log "Starting Gunicorn on port ${PORT:-7860}..."
# Preload model if LLM provider is llama_cpp (to avoid timeout on first request)
if [[ "${DEFAULT_LLM_PROVIDER:-}" == "llama_cpp" ]] || [[ "${LLM_PROVIDER:-}" == "llama_cpp" ]]; then
  log "Preloading llama.cpp model to avoid first-request timeout..."
  python -c "
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hue_portal.hue_portal.settings')
import django
django.setup()
from hue_portal.chatbot.llm_integration import LLMGenerator
try:
    gen = LLMGenerator()
    if gen.llama_cpp:
        print('[ENTRYPOINT] ✅ Model preloaded successfully')
    else:
        print('[ENTRYPOINT] ⚠️ Model not loaded (may load on first request)')
except Exception as e:
    print(f'[ENTRYPOINT] ⚠️ Model preload failed: {e} (will load on first request)')
" || log "Model preload skipped (will load on first request)"
fi

exec gunicorn hue_portal.hue_portal.wsgi:application \
    --bind 0.0.0.0:${PORT:-7860} \
    --timeout 600 \
    --workers 1 \
    --worker-class sync