visual_memory / .env.example
kdemon1011's picture
Upload folder using huggingface_hub
15503f9 verified
# ── Environment Server Configuration ──
OPENENV_PORT=8000
MAX_CONCURRENT_ENVS=8
ENABLE_WEB_INTERFACE=true
RENDER_MODE=svg
MAX_BOARD_SIZE=12
# VISUAL_MEMORY_SCENARIOS_DIR= # Optional: override scenario directory path
# ── LLM Configuration (used by run_eval.py) ──
LLM_MODEL=gpt-4o
LLM_TEMPERATURE=0.0
LLM_MAX_TOKENS=1024
# ── API Keys ──
# Only the key for your chosen --model provider is required.
# OpenAI (for gpt-4o, gpt-5.4, o3-pro, etc.)
OPENAI_API_KEY=
OPENAI_API_BASE=https://api.openai.com/v1
# Anthropic (for claude-sonnet-4-6, claude-opus-4-6, etc.)
ANTHROPIC_API_KEY=
# Google (for gemini-2.5-pro, etc.)
GOOGLE_API_KEY=
# For local models via Ollama β€” no key needed, just run:
# ollama serve && ollama pull llama3
# Then use: --model ollama/llama3