# ═══════════════════════════════════════════════════════════════════════════ # HuggingClaw — Environment Configuration # Deploy OpenClaw on HuggingFace Spaces # ═══════════════════════════════════════════════════════════════════════════ # # Usage: # Local Docker → cp .env.example .env → fill in values → docker run --env-file .env # HF Spaces → Set each variable as a "Repository Secret" in Space Settings # # Legend: # [REQUIRED] Must be set, or data persistence will not work # [RECOMMENDED] Strongly recommended for production use # [OPTIONAL] Fine-tune behavior; safe to leave empty # # ═══════════════════════════════════════════════════════════════════════════ # ─── SECURITY ───────────────────────────────────────────────────────────── # # Password for the Control UI dashboard. # Visitors can see the UI, but only users with this password can connect # and control the OpenClaw instance (manage agents, plugins, settings). # # [RECOMMENDED] Default: huggingclaw # OPENCLAW_PASSWORD=huggingclaw # ─── DATA PERSISTENCE ──────────────────────────────────────────────────── # # HuggingClaw auto-syncs the ~/.openclaw directory to a private HF Dataset # repo, so your conversations, settings, and credentials survive restarts. # Without these two variables, all data is lost when the Space restarts. # # HuggingFace Access Token with WRITE permission. # Create one at: https://huggingface.co/settings/tokens # Scopes needed: read + write access to your repos. # # [REQUIRED] # HF_TOKEN=hf_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx # Target Dataset repository for data backup. # Format: your-username/repo-name # Example: your-name/YourSpace-data # # Manual mode (default): create the repo yourself, then set this variable. # Auto mode (AUTO_CREATE_DATASET=true): if not set, HuggingClaw derives # it from your HF_TOKEN username → "your-username/HuggingClaw-data". # # [REQUIRED in manual mode, OPTIONAL in auto mode] # OPENCLAW_DATASET_REPO=your-username/HuggingClaw-data # Whether to auto-create the Dataset repo if it doesn't exist. # When true: HuggingClaw creates a PRIVATE dataset repo on first startup. # If OPENCLAW_DATASET_REPO is not set, the repo name is auto-derived # from your HF_TOKEN username (e.g. "your-username/HuggingClaw-data"). # When false (default): you must create the repo manually on HuggingFace # and set OPENCLAW_DATASET_REPO yourself. # # [OPTIONAL] Default: false # # AUTO_CREATE_DATASET=false # How often (in seconds) to back up data to the Dataset repo. # Lower values = safer but more API calls to HuggingFace. # # [OPTIONAL] Default: 60 # # SYNC_INTERVAL=60 # ─── LLM / OPENAI-COMPATIBLE API ─────────────────────────────────────────── # # OpenClaw supports any OpenAI-compatible API. Set the API key for the # provider(s) you use. See OpenClaw docs: https://openclawdoc.com/docs/reference/environment-variables # # OpenAI (or any OpenAI-compatible endpoint) # Use OPENAI_API_KEY alone for api.openai.com, or set OPENAI_BASE_URL for # compatible endpoints (e.g. OpenRouter, local LLM servers, Azure OpenAI). # # [RECOMMENDED] At least one of the following for AI conversations # OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx # Optional: base URL for OpenAI-compatible API (default: https://api.openai.com/v1) # Examples: https://openrouter.ai/api/v1, http://localhost:11434/v1 (Ollama), etc. # # OPENAI_BASE_URL=https://api.openai.com/v1 # OpenRouter — one key, 200+ models, free tier: https://openrouter.ai/keys # OPENROUTER_API_KEY=sk-or-v1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx # Other providers (OpenClaw reads these from the environment) # # ANTHROPIC_API_KEY=sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx # GOOGLE_API_KEY=AIza... # MISTRAL_API_KEY=mis-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx # COHERE_API_KEY=co-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx # Default model for new conversations (must exist in your configured provider). # # [OPTIONAL] Examples: openai/gpt-5-nano, openrouter/deepseek/deepseek-chat:free # # OPENCLAW_DEFAULT_MODEL=openai/gpt-5-nano # ─── LOCAL MODEL INFERENCE (Ollama) ──────────────────────────────────── # Run small models (≤1B) locally on CPU - perfect for HF Spaces free tier! # Models are stored in ~/.ollama and persisted via HF Dataset sync. # For NeuralNexusLab/HacKing 0.6B or other lightweight models. # # Enable local model inference # [OPTIONAL] Default: false # # LOCAL_MODEL_ENABLED=true # Model to pull from Ollama library or HuggingFace # Format: model_name (e.g., neuralnexuslab/hacking, llama3.1:8b, qwen2.5:7b) # For HF models: use hf.co/username/modelname format # [OPTIONAL] Example: neuralnexuslab/hacking # # LOCAL_MODEL_NAME=neuralnexuslab/hacking # Ollama API base URL (internal container network) # [OPTIONAL] Default: http://localhost:11434/v1 # # LOCAL_MODEL_BASE_URL=http://localhost:11434/v1 # Model ID as it appears in OpenClaw (matches Ollama model name) # [OPTIONAL] Default: neuralnexuslab/hacking # # LOCAL_MODEL_ID=neuralnexuslab/hacking # Display name in Control UI model selector # [OPTIONAL] Default: NeuralNexus HacKing 0.6B # # LOCAL_MODEL_NAME_DISPLAY=NeuralNexus HacKing 0.6B # Ollama server settings # [OPTIONAL] Default: 2 (good for 0.6B models on CPU) # # OLLAMA_NUM_PARALLEL=2 # Keep model loaded in memory (-1 = forever, 5m = 5 minutes) # [OPTIONAL] Default: -1 (always loaded) # # OLLAMA_KEEP_ALIVE=-1 # Ollama models directory (persisted across restarts) # [OPTIONAL] Default: ~/.ollama/models # # OLLAMA_MODELS=/home/node/.ollama/models # ─── PERFORMANCE ────────────────────────────────────────────────────────── # # Node.js heap memory limit in MB. # HF free tier provides 16 GB RAM. Default 512 MB is enough for most cases. # Increase if you run complex agent workflows or handle large conversations. # # [OPTIONAL] Default: 512 # # NODE_MEMORY_LIMIT=512 # ─── LOCALE ─────────────────────────────────────────────────────────────── # # Timezone for log timestamps and scheduled tasks. # # [OPTIONAL] Default: UTC # # TZ=Asia/Shanghai # ─── OPENCLAW (from official docs) ───────────────────────────────────────── # # Optional overrides. See https://openclawdoc.com/docs/reference/environment-variables # # OPENCLAW_HOME=~/.openclaw # OPENCLAW_STATE_DIR=~/.openclaw # OPENCLAW_CONFIG_PATH=~/.openclaw/openclaw.json # OPENCLAW_LOG_LEVEL=info # OPENCLAW_API_PORT=8080 # OPENCLAW_WS_PORT=8081 # OPENCLAW_HOST=0.0.0.0 # OLLAMA_HOST=http://localhost:11434 # OPENCLAW_HTTP_PROXY= # OPENCLAW_HTTPS_PROXY= # OPENCLAW_NO_PROXY= # ═══════════════════════════════════════════════════════════════════════════ # OpenClaw 官方环境变量 # ═══════════════════════════════════════════════════════════════════════════ # # HuggingClaw 启动 OpenClaw 时透传整个环境(env=os.environ.copy()), # 因此 OpenClaw 官方文档中列出的 **所有** 环境变量在 HF Spaces / Docker 中 # 设置后均可直接生效。 # 官方完整列表见:https://openclawdoc.com/docs/reference/environment-variables # # 常见类别(仅列举部分): # API Keys: OPENAI_API_KEY, ANTHROPIC_API_KEY, GOOGLE_API_KEY, MISTRAL_API_KEY, # COHERE_API_KEY, OPENROUTER_API_KEY # Server: OPENCLAW_API_PORT, OPENCLAW_WS_PORT, OPENCLAW_METRICS_PORT, # OPENCLAW_HOST, OPENCLAW_TLS_* # App: OPENCLAW_CONFIG, OPENCLAW_DATA_DIR, OPENCLAW_LOG_LEVEL, # OPENCLAW_LOG_FORMAT, OPENCLAW_LOG_FILE, OPENCLAW_ENV # Memory: OPENCLAW_MEMORY_BACKEND, OPENCLAW_REDIS_URL, OPENCLAW_SQLITE_PATH # Network: OPENCLAW_HTTP_PROXY, OPENCLAW_HTTPS_PROXY, OPENCLAW_NO_PROXY, # OPENCLAW_OUTBOUND_MODE # Secrets: OPENCLAW_SECRETS_BACKEND, OPENCLAW_SECRETS_KEY, VAULT_ADDR, VAULT_TOKEN # Ollama: OLLAMA_HOST, OLLAMA_NUM_PARALLEL, OLLAMA_KEEP_ALIVE # Browser: OPENCLAW_BROWSER_EXECUTABLE, OPENCLAW_BROWSER_HEADLESS # # ═══════════════════════════════════════════════════════════════════════════ # # ═══════════════════════════════════════════════════════════════════════════ # HuggingClaw 新增变量一览(仅本仓库脚本使用) # ═══════════════════════════════════════════════════════════════════════════ # # ─── 安全 / 控制台 ─────────────────────────────────────────────────────── # OPENCLAW_PASSWORD [推荐] 控制台密码,未设则默认 huggingclaw # # ─── 持久化 (HuggingFace Dataset) ─────────────────────────────────────── # HF_TOKEN [必填] HF 访问令牌,需具备写入权限 # OPENCLAW_DATASET_REPO [必填] 备份用 Dataset 仓库,如 your-name/HuggingClaw-data # AUTO_CREATE_DATASET [可选] 是否自动创建仓库,默认 false(安全考虑) # SYNC_INTERVAL [可选] 备份间隔(秒),默认 60 # HF_HUB_DOWNLOAD_TIMEOUT [可选] 下载超时(秒),默认 300 # HF_HUB_UPLOAD_TIMEOUT [可选] 上传超时(秒),默认 600 # # ─── LLM / 对话 API(至少配置其一以启用 AI 对话)──────────────────────── # OPENAI_API_KEY [推荐] OpenAI 或兼容端点 API Key # OPENAI_BASE_URL [可选] 兼容 API 基地址,默认 https://api.openai.com/v1 # OPENROUTER_API_KEY [可选] OpenRouter,200+ 模型、免费额度 # ANTHROPIC_API_KEY [可选] Anthropic Claude # GOOGLE_API_KEY [可选] Google / Gemini # MISTRAL_API_KEY [可选] Mistral # COHERE_API_KEY [可选] Cohere # OPENCLAW_DEFAULT_MODEL [可选] 默认模型 ID # # ─── 消息渠道 ───────────────────────────────────────────────────────── # Telegram、WhatsApp 等消息渠道均可在 Control UI 中配置,无需环境变量。 # # ─── HuggingFace Spaces 运行时(HF 自动注入,一般无需手动设)──────────── # SPACE_HOST 当前 Space 域名,如 xxx.hf.space # SPACE_ID 仓库 ID,如 username/HuggingClaw # # ─── 性能与运行 ─────────────────────────────────────────────────────── # NODE_MEMORY_LIMIT [可选] Node 堆内存上限(MB),默认 512 # TZ [可选] 时区,如 Asia/Shanghai # # ═══════════════════════════════════════════════════════════════════════════