Spaces:
Sleeping
Sleeping
| # ββ Environment Server Configuration ββ | |
| OPENENV_PORT=8000 | |
| MAX_CONCURRENT_ENVS=8 | |
| ENABLE_WEB_INTERFACE=true | |
| RENDER_MODE=svg | |
| MAX_BOARD_SIZE=12 | |
| # VISUAL_MEMORY_SCENARIOS_DIR= # Optional: override scenario directory path | |
| # ββ LLM Configuration (used by run_eval.py) ββ | |
| LLM_MODEL=gpt-4o | |
| LLM_TEMPERATURE=0.0 | |
| LLM_MAX_TOKENS=1024 | |
| # ββ API Keys ββ | |
| # Only the key for your chosen --model provider is required. | |
| # OpenAI (for gpt-4o, gpt-5.4, o3-pro, etc.) | |
| OPENAI_API_KEY= | |
| OPENAI_API_BASE=https://api.openai.com/v1 | |
| # Anthropic (for claude-sonnet-4-6, claude-opus-4-6, etc.) | |
| ANTHROPIC_API_KEY= | |
| # Google (for gemini-2.5-pro, etc.) | |
| GOOGLE_API_KEY= | |
| # For local models via Ollama β no key needed, just run: | |
| # ollama serve && ollama pull llama3 | |
| # Then use: --model ollama/llama3 | |