File size: 4,355 Bytes
dc9131f
 
 
beaa858
 
048b978
d4ca08a
 
 
e2a0fa2
 
048b978
 
 
 
d4ca08a
 
 
e2a0fa2
 
 
 
d4ca08a
e2a0fa2
 
d4ca08a
 
048b978
 
 
 
 
 
5499dd2
048b978
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d4ca08a
 
 
e2a0fa2
048b978
e2a0fa2
d4ca08a
beaa858
 
 
d4ca08a
beaa858
dc9131f
6fe25e1
dc9131f
 
b2eb544
 
0ac036c
b2eb544
0d141d4
 
 
 
 
b2eb544
0d141d4
 
 
b2eb544
 
 
 
 
 
 
beaa858
dc9131f
 
 
 
f301510
dc9131f
beaa858
 
 
 
 
 
 
b2eb544
6fe25e1
 
dc9131f
 
 
beaa858
d4ca08a
dc9131f
 
 
 
b2eb544
dc9131f
048b978
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
#!/usr/bin/env bash
set -euo pipefail

LITELLM_PID=""

# ── 0. Write openclaw.json ─────────────────────────────────────────────────────
OPENCLAW_CONFIG_DIR="${HOME}/.openclaw"
mkdir -p "$OPENCLAW_CONFIG_DIR"

GATEWAY_TOKEN="${OPENCLAW_GATEWAY_TOKEN:-openclaw-hf-default-token}"

# Register LiteLLM proxy as a custom provider directly in openclaw.json.
# apiKey goes in models.providers (not auth-profiles.json) for custom providers.
# agents.defaults.model points to litellm/default so OpenClaw stops trying Anthropic.
cat > "${OPENCLAW_CONFIG_DIR}/openclaw.json" << OPENCLAW_JSON
{
  "gateway": {
    "bind": "lan",
    "auth": {
      "mode": "token",
      "token": "${GATEWAY_TOKEN}"
    },
    "controlUi": {
      "allowInsecureAuth": true,
      "dangerouslyDisableDeviceAuth": true,
      "dangerouslyAllowHostHeaderOriginFallback": true
    }
  },
  "models": {
    "providers": {
      "litellm": {
        "baseUrl": "http://127.0.0.1:4000",
        "apiKey": "litellm-proxy",
        "api": "openai-completions",
        "models": [
          {
            "id": "default",
            "name": "LiteLLM Proxy (${LITELLM_MODEL:-custom})",
            "contextWindow": 200000,
            "maxTokens": 8192,
            "input": ["text", "image"],
            "reasoning": false
          }
        ]
      }
    }
  },
  "agents": {
    "defaults": {
      "model": "litellm/default"
    }
  }
}
OPENCLAW_JSON

echo "[start.sh] openclaw.json written (provider=litellm, token=${GATEWAY_TOKEN:0:8}...)"
echo "[start.sh] Access UI at: https://<your-space>.hf.space/#token=${GATEWAY_TOKEN}"

# ── 1. Check if LiteLLM should be enabled ─────────────────────────────────────
if [ -z "${LITELLM_API_KEY:-}" ] || [ -z "${LITELLM_MODEL:-}" ]; then
    echo "[start.sh] LITELLM_API_KEY or LITELLM_MODEL not set β€” starting OpenClaw without LiteLLM proxy"
    exec openclaw gateway --port 7860 --allow-unconfigured
fi

# ── 2. Write LiteLLM proxy config ─────────────────────────────────────────────
LITELLM_CONFIG=/tmp/litellm_config.yaml

{
  echo "model_list:"
  echo "  - model_name: \"default\""
  echo "    litellm_params:"
  # Use LITELLM_MODEL directly β€” must include provider prefix, e.g.:
  # nvidia_nim/moonshotai/kimi-k2-instruct-0905  (NVIDIA NIM)
  # openai/gpt-4o                                (OpenAI)
  # anthropic/claude-sonnet-4-20250514           (Anthropic)
  echo "      model: \"${LITELLM_MODEL}\""
  echo "      api_key: \"${LITELLM_API_KEY}\""
  if [ -n "${LITELLM_API_BASE:-}" ]; then
    echo "      api_base: \"${LITELLM_API_BASE}\""
  fi
  echo ""
  echo "litellm_settings:"
  echo "  drop_params: true"
  echo "  num_retries: 3"
  echo "  request_timeout: 120"
} > "$LITELLM_CONFIG"

echo "[start.sh] LiteLLM config written for model: ${LITELLM_MODEL}"

# ── 3. Start LiteLLM proxy in the background ──────────────────────────────────
litellm --config "$LITELLM_CONFIG" --port 4000 --host 127.0.0.1 &
LITELLM_PID=$!
echo "[start.sh] LiteLLM started (pid=$LITELLM_PID)"

cleanup() {
    if [ -n "$LITELLM_PID" ]; then
        echo "[start.sh] stopping LiteLLM (pid=$LITELLM_PID)"
        kill "$LITELLM_PID" 2>/dev/null || true
    fi
}
trap cleanup EXIT

# ── 4. Wait for LiteLLM to be healthy (max 60 s) ─────────────────────────────
MAX_WAIT=60
WAITED=0
until curl -sf http://127.0.0.1:4000/health/liveliness > /dev/null 2>&1; do
    if [ "$WAITED" -ge "$MAX_WAIT" ]; then
        echo "[start.sh] WARNING: LiteLLM not healthy after ${MAX_WAIT}s β€” starting OpenClaw without proxy"
        exec openclaw gateway --port 7860 --allow-unconfigured
    fi
    sleep 1
    WAITED=$((WAITED + 1))
done
echo "[start.sh] LiteLLM healthy after ${WAITED}s"

# ── 5. Start OpenClaw ─────────────────────────────────────────────────────────
exec openclaw gateway --port 7860 --allow-unconfigured