SentinelAI / .env.example
iitian's picture
Sync SentinelAI project and add Hugging Face Docker Space layout.
8b3905d
# Core services
DATABASE_URL=postgresql+asyncpg://sentinel:sentinel@localhost:5432/sentinelai
REDIS_URL=redis://localhost:6379/0
SKIP_DB=1
# AsyncPG connect timeout (seconds) when PostgreSQL is unreachable — avoids long hangs
DB_CONNECT_TIMEOUT_SEC=10
CORS_ORIGINS=http://localhost:3000
# Collectors
COLLECTOR_WATCH_DIR=./demo_logs
COLLECTOR_POLL_SEC=1.0
COLLECTOR_MISSING_RETRY_SEC=10
COLLECTOR_HOSTNAME=edge-01
COLLECTOR_FILE_PATHS=
COLLECT_AUTH_LOG=0
AUTH_LOG_PATH=/var/log/auth.log
ENABLE_MOCK_CLOUD_POLL=1
# LangGraph compile dry-run runs in the background after startup (optional skip / timeout)
# SKIP_LANGGRAPH_WARMUP=1
# LANGGRAPH_WARMUP_TIMEOUT_SEC=120
AUTO_AI_ON_INCIDENT=1
AUTO_AI_MIN_SEC=75
# Local LLM (Ollama)
OLLAMA_HOST=http://localhost:11434
OLLAMA_MODEL=llama3
# OpenAI-compatible inference (vLLM, Fireworks, OpenAI, etc.)
# Must be the INFERENCE base URL — not http://localhost:8000 (that is this app’s API).
# Fireworks example: https://api.fireworks.ai/inference/v1
VLLM_BASE_URL=
VLLM_API_KEY=
# Fireworks example: accounts/fireworks/models/deepseek-v4-pro
SENTINEL_LLM_MODEL=llama3
OPENAI_BASE_URL=
OPENAI_API_KEY=
LLM_TEMPERATURE=0.2
LLM_MAX_TOKENS=4096
# Optional Fireworks-style sampling: LLM_TOP_P=1 LLM_TOP_K=40
# Optional threat intel
ABUSEIPDB_API_KEY=
VIRUSTOTAL_API_KEY=
OTX_API_KEY=
# Alerting
SLACK_WEBHOOK_URL=
DISCORD_WEBHOOK_URL=
TEAMS_WEBHOOK_URL=
GENERIC_ALERT_WEBHOOK=
# Frontend (Next.js) — also set when running npm run dev
NEXT_PUBLIC_API_URL=http://localhost:8000
CHROMA_PERSIST_DIR=./chroma_data