proti0070 commited on
Commit
b07bb2c
Β·
verified Β·
1 Parent(s): 7dbb323

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. Dockerfile +5 -6
  2. requirements.txt +0 -1
  3. scripts/entrypoint.sh +0 -32
  4. scripts/sync_hf.py +2 -31
Dockerfile CHANGED
@@ -1,21 +1,20 @@
1
- # OpenClaw on Hugging Face Spaces β€” with Local Model Support (Ollama)
2
  # δΌ˜εŒ–η‚ΉοΌšnode η”¨ζˆ·ζž„ε»ΊοΌˆζΆˆι™€ chownοΌ‰γ€εˆεΉΆ RUN ε±‚οΌˆε‡ε°‘ε±‚εΌ€ι”€οΌ‰
3
  FROM node:22-bookworm
4
  SHELL ["/bin/bash", "-c"]
5
 
6
- # ── Layer 1 (root): η³»η»ŸδΎθ΅– + Ollama + ε·₯ε…·οΌˆε…¨ιƒ¨εˆεΉΆδΈΊδΈ€ε±‚οΌ‰β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€
7
- RUN echo "[build][layer1] System deps + Ollama..." && START=$(date +%s) \
8
  && apt-get update \
9
  && apt-get install -y --no-install-recommends git ca-certificates curl python3 python3-pip patch \
10
  && rm -rf /var/lib/apt/lists/* \
11
  && pip3 install --no-cache-dir --break-system-packages huggingface_hub \
12
- && curl -fsSL https://ollama.com/install.sh | sh \
13
  && corepack enable \
14
  && mkdir -p /app \
15
  && chown node:node /app \
16
- && mkdir -p /home/node/.openclaw/workspace /home/node/.openclaw/credentials /home/node/.ollama \
17
  && chown -R node:node /home/node \
18
- && echo "[build][layer1] System deps + Ollama: $(($(date +%s) - START))s"
19
 
20
  # ── εˆ‡ζ’εˆ° node η”¨ζˆ·οΌˆεŽη»­ζ‰€ζœ‰ζ“δ½œιƒ½δ»₯ node θΊ«δ»½οΌŒζ— ιœ€ chown)───────────────
21
  USER node
 
1
+ # OpenClaw on Hugging Face Spaces β€” with Groq Support
2
  # δΌ˜εŒ–η‚ΉοΌšnode η”¨ζˆ·ζž„ε»ΊοΌˆζΆˆι™€ chownοΌ‰γ€εˆεΉΆ RUN ε±‚οΌˆε‡ε°‘ε±‚εΌ€ι”€οΌ‰
3
  FROM node:22-bookworm
4
  SHELL ["/bin/bash", "-c"]
5
 
6
+ # ── Layer 1 (root): η³»η»ŸδΎθ΅– + ε·₯ε…·οΌˆε…¨ιƒ¨εˆεΉΆδΈΊδΈ€ε±‚οΌ‰β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€
7
+ RUN echo "[build][layer1] System deps + tools..." && START=$(date +%s) \
8
  && apt-get update \
9
  && apt-get install -y --no-install-recommends git ca-certificates curl python3 python3-pip patch \
10
  && rm -rf /var/lib/apt/lists/* \
11
  && pip3 install --no-cache-dir --break-system-packages huggingface_hub \
 
12
  && corepack enable \
13
  && mkdir -p /app \
14
  && chown node:node /app \
15
+ && mkdir -p /home/node/.openclaw/workspace /home/node/.openclaw/credentials \
16
  && chown -R node:node /home/node \
17
+ && echo "[build][layer1] System deps + tools: $(($(date +%s) - START))s"
18
 
19
  # ── εˆ‡ζ’εˆ° node η”¨ζˆ·οΌˆεŽη»­ζ‰€ζœ‰ζ“δ½œιƒ½δ»₯ node θΊ«δ»½οΌŒζ— ιœ€ chown)───────────────
20
  USER node
requirements.txt CHANGED
@@ -1,2 +1 @@
1
  huggingface_hub>=0.24.5 # Force rebuild 2026-02-11
2
- ollama>=0.1.0 # Ollama Python client for local model inference
 
1
  huggingface_hub>=0.24.5 # Force rebuild 2026-02-11
 
scripts/entrypoint.sh CHANGED
@@ -6,38 +6,6 @@ BOOT_START=$(date +%s)
6
  echo "[entrypoint] OpenClaw HuggingFace Spaces Entrypoint"
7
  echo "[entrypoint] ======================================="
8
 
9
- # ── Start Ollama Server (if enabled) ───────────────────────────────────
10
- if [ -n "$LOCAL_MODEL_ENABLED" ] && [ "$LOCAL_MODEL_ENABLED" = "true" ]; then
11
- echo "[entrypoint] Starting local model inference server..."
12
-
13
- export OLLAMA_HOST=0.0.0.0:11434
14
- export OLLAMA_MODELS=/home/node/.ollama/models
15
- export OLLAMA_NUM_PARALLEL=${OLLAMA_NUM_PARALLEL:-2}
16
- export OLLAMA_KEEP_ALIVE=${OLLAMA_KEEP_ALIVE:--1}
17
-
18
- # Start Ollama in background
19
- nohup ollama serve > /home/node/logs/ollama.log 2>&1 &
20
- OLLAMA_PID=$!
21
- echo "[entrypoint] Ollama server started (PID: $OLLAMA_PID)"
22
-
23
- # Wait for Ollama to be ready
24
- echo "[entrypoint] Waiting for Ollama to be ready..."
25
- for i in $(seq 1 30); do
26
- if curl -s http://localhost:11434/api/tags > /dev/null 2>&1; then
27
- echo "[entrypoint] Ollama is ready!"
28
- break
29
- fi
30
- sleep 1
31
- done
32
-
33
- # Pull model if specified
34
- if [ -n "$LOCAL_MODEL_NAME" ]; then
35
- echo "[entrypoint] Pulling model: $LOCAL_MODEL_NAME"
36
- ollama pull "$LOCAL_MODEL_NAME"
37
- echo "[entrypoint] Model pulled successfully!"
38
- fi
39
- fi
40
-
41
  # ── DNS pre-resolution (background β€” non-blocking) ───────────────────────
42
  # Resolves WhatsApp domains via DoH for dns-fix.cjs to consume.
43
  # Telegram connectivity is handled by API base auto-probe in sync_hf.py.
 
6
  echo "[entrypoint] OpenClaw HuggingFace Spaces Entrypoint"
7
  echo "[entrypoint] ======================================="
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  # ── DNS pre-resolution (background β€” non-blocking) ───────────────────────
10
  # Resolves WhatsApp domains via DoH for dns-fix.cjs to consume.
11
  # Telegram connectivity is handled by API base auto-probe in sync_hf.py.
scripts/sync_hf.py CHANGED
@@ -74,13 +74,6 @@ XAI_BASE_URL = os.environ.get("XAI_BASE_URL", "https://api.x.ai/v1")
74
  GROQ_API_KEY = os.environ.get("GROQ_API_KEY", "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")
75
  GROQ_BASE_URL = os.environ.get("GROQ_BASE_URL", "https://api.groq.com/openai/v1")
76
 
77
- # Local model inference (Ollama or compatible server)
78
- LOCAL_MODEL_ENABLED = os.environ.get("LOCAL_MODEL_ENABLED", "false").lower() in ("true", "1", "yes")
79
- LOCAL_MODEL_NAME = os.environ.get("LOCAL_MODEL_NAME", "neuralnexuslab/hacking:latest")
80
- LOCAL_MODEL_BASE_URL = os.environ.get("LOCAL_MODEL_BASE_URL", "http://localhost:11434/v1")
81
- LOCAL_MODEL_ID = os.environ.get("LOCAL_MODEL_ID", "neuralnexuslab/hacking")
82
- LOCAL_MODEL_NAME_DISPLAY = os.environ.get("LOCAL_MODEL_NAME_DISPLAY", "NeuralNexus HacKing 0.6B")
83
-
84
  # Gateway token (default: huggingclaw; override via GATEWAY_TOKEN env var)
85
  GATEWAY_TOKEN = os.environ.get("GATEWAY_TOKEN", "huggingclaw")
86
 
@@ -91,8 +84,6 @@ OPENCLAW_DEFAULT_MODEL = os.environ.get("OPENCLAW_DEFAULT_MODEL") or (
91
  "xai/grok-beta" if XAI_API_KEY else
92
  "openai/gpt-5-nano" if OPENAI_API_KEY else "openrouter/openai/gpt-oss-20b:free"
93
  )
94
-
95
- # HF Spaces built-in env vars (auto-set by HF runtime)
96
  SPACE_HOST = os.environ.get("SPACE_HOST", "") # e.g. "tao-shen-huggingclaw.hf.space"
97
  SPACE_ID = os.environ.get("SPACE_ID", "") # e.g. "tao-shen/HuggingClaw"
98
 
@@ -525,28 +516,8 @@ class OpenClawFullSync:
525
  data["agents"]["defaults"]["model"]["primary"] = "groq/llama-3.3-70b-versatile"
526
  print("[SYNC] Llama 3.3 70B set as default model")
527
 
528
- # Local model provider (Ollama or compatible)
529
- if LOCAL_MODEL_ENABLED:
530
- data["models"]["providers"]["local"] = {
531
- "baseUrl": LOCAL_MODEL_BASE_URL,
532
- "apiKey": "ollama",
533
- "api": "openai-completions",
534
- "models": [
535
- {
536
- "id": LOCAL_MODEL_ID,
537
- "name": LOCAL_MODEL_NAME_DISPLAY
538
- }
539
- ]
540
- }
541
- print(f"[SYNC] Set local model provider ({LOCAL_MODEL_BASE_URL})")
542
-
543
- # Set as default if no other API keys are set
544
- if not OPENAI_API_KEY and not OPENROUTER_API_KEY:
545
- data["agents"]["defaults"]["model"]["primary"] = f"local/{LOCAL_MODEL_ID}"
546
- print(f"[SYNC] Set local model as default: {LOCAL_MODEL_ID}")
547
-
548
- if not OPENAI_API_KEY and not OPENROUTER_API_KEY and not LOCAL_MODEL_ENABLED and not XAI_API_KEY and not GROQ_API_KEY:
549
- print("[SYNC] WARNING: No API key set (OPENAI/OPENROUTER/GROQ/XAI/LOCAL), LLM features will not work")
550
  data["models"]["providers"].pop("gemini", None)
551
  data["agents"]["defaults"]["model"]["primary"] = OPENCLAW_DEFAULT_MODEL
552
 
 
74
  GROQ_API_KEY = os.environ.get("GROQ_API_KEY", "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")
75
  GROQ_BASE_URL = os.environ.get("GROQ_BASE_URL", "https://api.groq.com/openai/v1")
76
 
 
 
 
 
 
 
 
77
  # Gateway token (default: huggingclaw; override via GATEWAY_TOKEN env var)
78
  GATEWAY_TOKEN = os.environ.get("GATEWAY_TOKEN", "huggingclaw")
79
 
 
84
  "xai/grok-beta" if XAI_API_KEY else
85
  "openai/gpt-5-nano" if OPENAI_API_KEY else "openrouter/openai/gpt-oss-20b:free"
86
  )
 
 
87
  SPACE_HOST = os.environ.get("SPACE_HOST", "") # e.g. "tao-shen-huggingclaw.hf.space"
88
  SPACE_ID = os.environ.get("SPACE_ID", "") # e.g. "tao-shen/HuggingClaw"
89
 
 
516
  data["agents"]["defaults"]["model"]["primary"] = "groq/llama-3.3-70b-versatile"
517
  print("[SYNC] Llama 3.3 70B set as default model")
518
 
519
+ if not OPENAI_API_KEY and not OPENROUTER_API_KEY and not XAI_API_KEY and not GROQ_API_KEY:
520
+ print("[SYNC] WARNING: No API key set (OPENAI/OPENROUTER/GROQ/XAI), LLM features will not work")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
521
  data["models"]["providers"].pop("gemini", None)
522
  data["agents"]["defaults"]["model"]["primary"] = OPENCLAW_DEFAULT_MODEL
523