| # ============================================================================= | |
| # ChessEcon — Environment Variables | |
| # Copy to .env and fill in your values: | |
| # cp .env.example .env | |
| # ============================================================================= | |
| # ── Anthropic (Claude Coach Agent) ─────────────────────────────────────────── | |
| # Used ONLY for complex/critical chess positions. Minimal API calls. | |
| # Get your key: https://console.anthropic.com/ | |
| ANTHROPIC_API_KEY= | |
| CLAUDE_MODEL=claude-opus-4-5 | |
| CLAUDE_MAX_TOKENS=1024 | |
| # ── HuggingFace (Trainable LLM Download) ───────────────────────────────────── | |
| # Required for gated models (Llama). Optional for public models (Qwen). | |
| # Get your token: https://huggingface.co/settings/tokens | |
| HF_TOKEN= | |
| # ── Player Model (Trainable LLM) ───────────────────────────────────────────── | |
| # Model to train with RL. Must be a HuggingFace model ID. | |
| # Options: | |
| # Qwen/Qwen2.5-0.5B-Instruct — 0.5B, CPU-friendly (~1 GB) | |
| # Qwen/Qwen2.5-1.5B-Instruct — 1.5B, good balance (~3 GB) | |
| # Qwen/Qwen2.5-3B-Instruct — 3B, strong reasoning (~6 GB) | |
| # meta-llama/Llama-3.2-1B-Instruct — 1B Llama (needs HF_TOKEN) | |
| # meta-llama/Llama-3.2-3B-Instruct — 3B Llama (needs HF_TOKEN) | |
| # meta-llama/Llama-3.1-8B-Instruct — 8B Llama (needs GPU + HF_TOKEN) | |
| PLAYER_MODEL=Qwen/Qwen2.5-0.5B-Instruct | |
| # ── Backend Server ──────────────────────────────────────────────────────────── | |
| BACKEND_PORT=8008 | |
| BACKEND_HOST=0.0.0.0 | |
| BACKEND_WORKERS=2 | |
| LOG_LEVEL=info | |
| # ── Frontend ────────────────────────────────────────────────────────────────── | |
| # URL of the backend WebSocket (used at runtime in the browser) | |
| VITE_WS_URL=ws://localhost:8008/ws | |
| VITE_API_URL=http://localhost:8008/api | |
| # ── RL Training Configuration ───────────────────────────────────────────────── | |
| RL_METHOD=grpo | |
| TRAIN_EVERY=5 | |
| TOTAL_GAMES=100 | |
| TRAIN_STEPS=200 | |
| DEVICE=cpu | |
| LEARNING_RATE=1e-5 | |
| BATCH_SIZE=4 | |
| NUM_GENERATIONS=4 | |
| # ── Economic Configuration ──────────────────────────────────────────────────── | |
| ENTRY_FEE=10.0 | |
| PRIZE_MULTIPLIER=0.9 | |
| INITIAL_WALLET=100.0 | |
| COACHING_FEE=5.0 | |
| COACHING_FEE=5.0 | |
| MIN_WALLET_FOR_COACHING=15.0 | |
| # ── Complexity Thresholds (Claude activation gates) ─────────────────────────── | |
| COMPLEXITY_THRESHOLD_COMPLEX=0.45 | |
| COMPLEXITY_THRESHOLD_CRITICAL=0.70 | |
| MAX_COACHING_RATE=0.35 | |
| # ── Model Storage ───────────────────────────────────────────────────────────── | |
| MODEL_CACHE_DIR=./training/models | |
| CHECKPOINT_DIR=./training/checkpoints | |
| SELFPLAY_DATA_DIR=./training/data | |
| SKIP_MODEL_DOWNLOAD=false | |
| # ── Stockfish (Optional heuristic engine) ──────────────────────────────────── | |
| # STOCKFISH_PATH=/usr/games/stockfish | |
| STOCKFISH_DEPTH=10 | |
| # ── Nevermined (x402 cross-team agent-to-agent payments) ───────────────────── | |
| # Get your API key: https://nevermined.app | |
| # Run python -m backend.economy.register_agent to populate PLAN_ID + AGENT_ID | |
| NVM_API_KEY= | |
| NVM_ENVIRONMENT=sandbox | |
| NVM_PLAN_ID= | |
| NVM_AGENT_ID= | |
| PORT=8006 | |
| CHESSECON_API_URL=https://viva-overbold-sherry.ngrok-free.dev | |
| # ── Cross-team coaching (subscriber side) ───────────────────────────────────── | |
| # Set these to call another team's /api/chess/analyze endpoint | |
| EXTERNAL_COACHING_URL= | |
| EXTERNAL_NVM_PLAN_ID= | |
| EXTERNAL_NVM_AGENT_ID= | |
| EXTERNAL_COACHING_BUDGET=50.0 | |
| MIN_WALLET_FOR_EXTERNAL=20.0 | |