Spaces:
Runtime error
Runtime error
feat: chat log panel + improved conversation orchestrator
Browse files- Add /api/chatlog GET/POST endpoints for conversation display
- Add chat log panel at bottom of Office frontend (Adam ↔ Eve dialogue)
- Rewrite conversation-loop.py with history context for coherent dialogue
- Frontend polls /api/chatlog every 5s and renders messages
- Remove old bash conversation script
- .claude/scheduled_tasks.lock +1 -0
- frontend/electron-standalone.html +63 -1
- marketing.md +708 -0
- scripts/conversation-loop.py +119 -0
- scripts/conversation-loop.sh +0 -89
- scripts/token-redirect.cjs +28 -0
.claude/scheduled_tasks.lock
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"sessionId":"9cf03742-3f47-4a01-bbe9-989dff2c6421","pid":64066,"acquiredAt":1773282355683}
|
frontend/electron-standalone.html
CHANGED
|
@@ -417,7 +417,41 @@
|
|
| 417 |
max-width: none;
|
| 418 |
justify-content: flex-start;
|
| 419 |
margin-top: 20px;
|
|
|
|
| 420 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 421 |
#game-container {
|
| 422 |
position: relative;
|
| 423 |
border: 0;
|
|
@@ -1650,8 +1684,16 @@
|
|
| 1650 |
</div>
|
| 1651 |
</div>
|
| 1652 |
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1653 |
</div>
|
| 1654 |
-
|
| 1655 |
<div id="asset-highlight"></div>
|
| 1656 |
<div id="room-loading-overlay" aria-live="polite" aria-busy="true">
|
| 1657 |
<div class="room-loading-inner">
|
|
@@ -5492,6 +5534,26 @@ function toggleBrokerPanel() {
|
|
| 5492 |
return s;
|
| 5493 |
}
|
| 5494 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5495 |
function fetchStatus() {
|
| 5496 |
return fetch('/status', { cache: 'no-store' })
|
| 5497 |
.then(response => response.json())
|
|
|
|
| 417 |
max-width: none;
|
| 418 |
justify-content: flex-start;
|
| 419 |
margin-top: 20px;
|
| 420 |
+
flex-wrap: wrap;
|
| 421 |
}
|
| 422 |
+
#chatlog-panel {
|
| 423 |
+
width: 100%;
|
| 424 |
+
max-height: 300px;
|
| 425 |
+
background: #1a1d27;
|
| 426 |
+
border: 4px solid #0e1119;
|
| 427 |
+
padding: 12px 16px;
|
| 428 |
+
display: flex;
|
| 429 |
+
flex-direction: column;
|
| 430 |
+
overflow: hidden;
|
| 431 |
+
font-family: 'ArkPixel', monospace;
|
| 432 |
+
}
|
| 433 |
+
#chatlog-title {
|
| 434 |
+
font-size: 14px;
|
| 435 |
+
color: #e0c97f;
|
| 436 |
+
margin-bottom: 8px;
|
| 437 |
+
text-align: center;
|
| 438 |
+
}
|
| 439 |
+
#chatlog-content {
|
| 440 |
+
flex: 1;
|
| 441 |
+
overflow-y: auto;
|
| 442 |
+
font-size: 13px;
|
| 443 |
+
line-height: 1.6;
|
| 444 |
+
color: #d1d5db;
|
| 445 |
+
}
|
| 446 |
+
#chatlog-content .chat-msg {
|
| 447 |
+
margin-bottom: 4px;
|
| 448 |
+
padding: 2px 0;
|
| 449 |
+
}
|
| 450 |
+
#chatlog-content .chat-msg .chat-speaker {
|
| 451 |
+
font-weight: bold;
|
| 452 |
+
}
|
| 453 |
+
#chatlog-content .chat-msg .chat-speaker.adam { color: #f87171; }
|
| 454 |
+
#chatlog-content .chat-msg .chat-speaker.eve { color: #a78bfa; }
|
| 455 |
#game-container {
|
| 456 |
position: relative;
|
| 457 |
border: 0;
|
|
|
|
| 1684 |
</div>
|
| 1685 |
</div>
|
| 1686 |
</div>
|
| 1687 |
+
|
| 1688 |
+
<!-- Chat Log 面板 -->
|
| 1689 |
+
<div id="chatlog-panel">
|
| 1690 |
+
<div id="chatlog-title">🦞 Adam ↔ Eve 对话</div>
|
| 1691 |
+
<div id="chatlog-content">
|
| 1692 |
+
<div style="color:#9ca3af;font-size:12px;text-align:center;padding:20px 0;">等待对话开始...</div>
|
| 1693 |
+
</div>
|
| 1694 |
+
</div>
|
| 1695 |
</div>
|
| 1696 |
+
|
| 1697 |
<div id="asset-highlight"></div>
|
| 1698 |
<div id="room-loading-overlay" aria-live="polite" aria-busy="true">
|
| 1699 |
<div class="room-loading-inner">
|
|
|
|
| 5534 |
return s;
|
| 5535 |
}
|
| 5536 |
|
| 5537 |
+
let lastChatlogLen = 0;
|
| 5538 |
+
function fetchChatlog() {
|
| 5539 |
+
fetch('/api/chatlog?t=' + Date.now(), { cache: 'no-store' })
|
| 5540 |
+
.then(r => r.json())
|
| 5541 |
+
.then(data => {
|
| 5542 |
+
const msgs = data.messages || [];
|
| 5543 |
+
if (msgs.length === lastChatlogLen) return;
|
| 5544 |
+
lastChatlogLen = msgs.length;
|
| 5545 |
+
const el = document.getElementById('chatlog-content');
|
| 5546 |
+
if (!el) return;
|
| 5547 |
+
el.innerHTML = msgs.map(m => {
|
| 5548 |
+
const cls = (m.speaker || '').toLowerCase();
|
| 5549 |
+
return `<div class="chat-msg"><span class="chat-speaker ${cls}">${m.speaker}:</span> ${m.text || ''}</div>`;
|
| 5550 |
+
}).join('');
|
| 5551 |
+
el.scrollTop = el.scrollHeight;
|
| 5552 |
+
})
|
| 5553 |
+
.catch(() => {});
|
| 5554 |
+
}
|
| 5555 |
+
setInterval(fetchChatlog, 5000);
|
| 5556 |
+
|
| 5557 |
function fetchStatus() {
|
| 5558 |
return fetch('/status', { cache: 'no-store' })
|
| 5559 |
.then(response => response.json())
|
marketing.md
ADDED
|
@@ -0,0 +1,708 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# HuggingClaw Reddit Marketing Playbook
|
| 2 |
+
|
| 3 |
+
# HuggingClaw Reddit 营销推广方案
|
| 4 |
+
|
| 5 |
+
---
|
| 6 |
+
|
| 7 |
+
> **Core Value Proposition / 核心价值主张:**
|
| 8 |
+
> Deploy a fully-featured, multi-channel AI assistant on HuggingFace Spaces — for free, forever. WhatsApp + Telegram + 40 LLM providers, with bulletproof data persistence.
|
| 9 |
+
>
|
| 10 |
+
> 在 HuggingFace Spaces 上免费部署一个功能完备的多渠道 AI 助手——永久免费。支持 WhatsApp + Telegram + 40+ LLM 供应商,数据永不丢失。
|
| 11 |
+
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
## Marketing Principles / 营销原则
|
| 15 |
+
|
| 16 |
+
<!--
|
| 17 |
+
Reddit 用户极度反感硬广。以下所有文案均遵循:
|
| 18 |
+
1. Value-First(价值先行):先给社区带来干货,再引出项目
|
| 19 |
+
2. Story-Driven(故事驱动):用真实的痛点和解决过程引发共鸣
|
| 20 |
+
3. Technical Credibility(技术可信度):用具体的技术细节建立信任
|
| 21 |
+
4. Community Tone(社区语气):像一个兴奋的开发者在分享,而非营销人员在推销
|
| 22 |
+
5. CTA Soft Landing(软着陆号召):以 "希望对你有用" 而非 "快来用我的产品" 收尾
|
| 23 |
+
-->
|
| 24 |
+
|
| 25 |
+
---
|
| 26 |
+
|
| 27 |
+
## Plan 1: r/selfhosted — The "Zero-Cost Always-On" Angle
|
| 28 |
+
|
| 29 |
+
## 方案一:r/selfhosted — "零成本永不宕机" 切入角度
|
| 30 |
+
|
| 31 |
+
**Why this subreddit / 为什么选这个社区:**
|
| 32 |
+
r/selfhosted (1.5M+ members) obsesses over self-hosting solutions that minimize cost and maximize uptime. HuggingClaw's free-tier deployment on HF Spaces directly hits this community's sweet spot.
|
| 33 |
+
|
| 34 |
+
r/selfhosted(150 万+成员)痴迷于低成本、高可用的自托管方案。HuggingClaw 在 HF Spaces 免费层上的部署方式直击该社区的核心需求。
|
| 35 |
+
|
| 36 |
+
**Marketing Technique / 营销技巧:**
|
| 37 |
+
Problem-Agitation-Solution (PAS) — Surface a pain point the audience already feels, amplify it, then present the solution.
|
| 38 |
+
|
| 39 |
+
问题-激化-解决(PAS)框架——先揭示受众已有的痛点,放大它,再呈现解决方案。
|
| 40 |
+
|
| 41 |
+
---
|
| 42 |
+
|
| 43 |
+
### Title / 标题
|
| 44 |
+
|
| 45 |
+
```
|
| 46 |
+
I got tired of paying $20/month for a chatbot server, so I made my AI assistant run on HuggingFace Spaces for $0 — with WhatsApp & Telegram built in
|
| 47 |
+
```
|
| 48 |
+
|
| 49 |
+
> 我厌倦了每月为聊天机器人服务器付 20 美元,所以我让我的 AI 助手在 HuggingFace Spaces 上以 0 美元运行——还内置了 WhatsApp 和 Telegram
|
| 50 |
+
|
| 51 |
+
### Body / 正文
|
| 52 |
+
|
| 53 |
+
```
|
| 54 |
+
Hey r/selfhosted,
|
| 55 |
+
|
| 56 |
+
Like many of you, I've been running my own AI assistant for a while. The problem?
|
| 57 |
+
Even a small VPS costs $15-20/month, and I still had to babysit uptime, deal with
|
| 58 |
+
DNS issues, and pray my data survives a reboot.
|
| 59 |
+
|
| 60 |
+
So I built HuggingClaw — a project that deploys OpenClaw (open-source AI assistant
|
| 61 |
+
framework) on HuggingFace Spaces' free tier. Here's what you get for $0:
|
| 62 |
+
|
| 63 |
+
**What it does:**
|
| 64 |
+
- 🔧 One-click deploy — just duplicate a HF Space and set 2 secrets
|
| 65 |
+
- 💬 WhatsApp + Telegram integration that actually works (solved HF's DNS blocking
|
| 66 |
+
with DNS-over-HTTPS fallback)
|
| 67 |
+
- 🧠 Connect any LLM: OpenAI, Claude, Gemini, OpenRouter (200+ free models), or
|
| 68 |
+
your own Ollama instance
|
| 69 |
+
- 💾 Automatic data persistence — your conversations, credentials, and settings
|
| 70 |
+
survive container restarts via atomic backups to a private HF Dataset repo
|
| 71 |
+
- 🔒 Token-based gateway auth, no credentials exposed to browser
|
| 72 |
+
|
| 73 |
+
**The hard part I solved so you don't have to:**
|
| 74 |
+
HuggingFace Spaces blocks DNS for WhatsApp and Telegram domains. I implemented a
|
| 75 |
+
full DNS-over-HTTPS resolver (Cloudflare + Google DoH) with Node.js dns.lookup
|
| 76 |
+
monkey-patching, plus a Telegram API proxy that intercepts fetch() calls and
|
| 77 |
+
redirects to working mirrors. Your WhatsApp QR login session persists across
|
| 78 |
+
restarts too — no re-scanning needed.
|
| 79 |
+
|
| 80 |
+
**Stack:** Docker + Node.js + Python sync daemon | 2 vCPU + 16GB RAM on HF free tier
|
| 81 |
+
|
| 82 |
+
It's fully open-source (MIT). Would love feedback from this community — you folks
|
| 83 |
+
always find the edge cases I miss.
|
| 84 |
+
|
| 85 |
+
GitHub: [link]
|
| 86 |
+
Live demo: [link]
|
| 87 |
+
|
| 88 |
+
Happy to answer any questions about the architecture or deployment process.
|
| 89 |
+
```
|
| 90 |
+
|
| 91 |
+
> 嘿 r/selfhosted,
|
| 92 |
+
>
|
| 93 |
+
> 和你们很多人一样,我一直在运行自己的 AI 助手。问题是?即使是最小的 VPS 也要每月 15-20 美元,我还得操心正常运行时间、处理 DNS 问题,并祈祷数据能在重启后幸存。
|
| 94 |
+
>
|
| 95 |
+
> 所以我构建了 HuggingClaw——一个在 HuggingFace Spaces 免费层上部署 OpenClaw(开源 AI 助手框架)的项目。以下是你 0 美元能得到的:
|
| 96 |
+
>
|
| 97 |
+
> **功能亮点:**
|
| 98 |
+
> - 一键部署——只需复制一个 HF Space 并设置 2 个密钥
|
| 99 |
+
> - WhatsApp + Telegram 集成,真的能用(通过 DNS-over-HTTPS 回退解决了 HF 的 DNS 封锁)
|
| 100 |
+
> - 连接任何 LLM:OpenAI、Claude、Gemini、OpenRouter(200+免费模型),或你自己的 Ollama 实例
|
| 101 |
+
> - 自动数据持久化——你的对话、凭证和设置通过原子备份到私有 HF Dataset 仓库,在容器重启后依然存在
|
| 102 |
+
> - 基于令牌的网关认证,浏览器端不暴露任何凭证
|
| 103 |
+
>
|
| 104 |
+
> **我替你解决了最难的部分:**
|
| 105 |
+
> HuggingFace Spaces 封锁了 WhatsApp 和 Telegram ��� DNS。我实现了完整的 DNS-over-HTTPS 解析器(Cloudflare + Google DoH),通过 Node.js dns.lookup 猴子补丁,加上一个 Telegram API 代理来拦截 fetch() 调用并重定向到可用的镜像。你的 WhatsApp QR 登录会话在重启后也会保留——不需要重新扫码。
|
| 106 |
+
>
|
| 107 |
+
> 完全开源(MIT)。希望能得到社区的反馈——你们总能发现我遗漏的边界情况。
|
| 108 |
+
|
| 109 |
+
---
|
| 110 |
+
|
| 111 |
+
## Plan 2: r/LocalLLaMA — The "Technical Deep Dive" Angle
|
| 112 |
+
|
| 113 |
+
## 方案二:r/LocalLLaMA — "技术深潜" 切入角度
|
| 114 |
+
|
| 115 |
+
**Why this subreddit / 为什么选这个社区:**
|
| 116 |
+
r/LocalLLaMA (800K+ members) is the most technically sophisticated AI community on Reddit. They value engineering depth, novel problem-solving, and democratizing AI access. The DNS-over-HTTPS hack and persistence architecture will resonate deeply here.
|
| 117 |
+
|
| 118 |
+
r/LocalLLaMA(80 万+成员)是 Reddit 上技术水平最高的 AI 社区。他们重视工程深度、新颖的问题解决方案和 AI 普惠化。DNS-over-HTTPS 方案和持久化架构会在这里引起深度共鸣。
|
| 119 |
+
|
| 120 |
+
**Marketing Technique / 营销技巧:**
|
| 121 |
+
Show-Your-Work Transparency — Engineers trust engineers who show their debugging process. Frame the post as a technical write-up with the project as a natural byproduct.
|
| 122 |
+
|
| 123 |
+
展示过程的透明度——工程师信任展示调试过程的工程师。将帖子包装为技术文章,项目只是自然产出。
|
| 124 |
+
|
| 125 |
+
---
|
| 126 |
+
|
| 127 |
+
### Title / 标题
|
| 128 |
+
|
| 129 |
+
```
|
| 130 |
+
How I reverse-engineered HuggingFace Spaces' DNS blocking to get WhatsApp & Telegram working for a free, always-on AI assistant
|
| 131 |
+
```
|
| 132 |
+
|
| 133 |
+
> 我是如何逆向 HuggingFace Spaces 的 DNS 封锁,让一个免费、永不停机的 AI 助手成功连接 WhatsApp 和 Telegram 的
|
| 134 |
+
|
| 135 |
+
### Body / 正文
|
| 136 |
+
|
| 137 |
+
```
|
| 138 |
+
TL;DR: HuggingFace Spaces silently blocks DNS resolution for certain domains
|
| 139 |
+
(including WhatsApp and Telegram). I built a full workaround stack — DNS-over-HTTPS
|
| 140 |
+
with Cloudflare/Google fallback, Node.js dns.lookup monkey-patching, and a
|
| 141 |
+
Telegram API fetch() interceptor — to make a free, persistent AI assistant that
|
| 142 |
+
connects to messaging apps. Open-sourced the whole thing.
|
| 143 |
+
|
| 144 |
+
---
|
| 145 |
+
|
| 146 |
+
**The Problem**
|
| 147 |
+
|
| 148 |
+
I wanted to deploy OpenClaw (open-source AI assistant) on HF Spaces' free tier
|
| 149 |
+
(2 vCPU, 16GB RAM — surprisingly generous). Everything worked great until I tried
|
| 150 |
+
connecting WhatsApp and Telegram. Connections would silently fail.
|
| 151 |
+
|
| 152 |
+
After hours of debugging, I discovered HF Spaces blocks DNS resolution for
|
| 153 |
+
specific domains at the infrastructure level. `dns.resolve()` and `dns.lookup()`
|
| 154 |
+
both return nothing for WhatsApp and Telegram endpoints.
|
| 155 |
+
|
| 156 |
+
**The Solution Stack**
|
| 157 |
+
|
| 158 |
+
1. **Pre-resolution layer** (`dns-resolve.py`): A Python background daemon that
|
| 159 |
+
resolves WhatsApp/Telegram domains via DNS-over-HTTPS (Cloudflare 1.1.1.1 and
|
| 160 |
+
Google 8.8.8.8 DoH endpoints) before Node.js even starts. Results are cached
|
| 161 |
+
with TTL support.
|
| 162 |
+
|
| 163 |
+
2. **Node.js DNS monkey-patch** (`dns-fix.cjs`): Overrides `dns.lookup()` at the
|
| 164 |
+
module level. Lookup chain: pre-resolved cache → system DNS → DoH fallback.
|
| 165 |
+
This catches all DNS calls from every dependency without patching individual
|
| 166 |
+
packages.
|
| 167 |
+
|
| 168 |
+
3. **Telegram API proxy** (`telegram-proxy.cjs`): Intercepts `global.fetch()` to
|
| 169 |
+
catch any request to `api.telegram.org` and redirect to working mirror
|
| 170 |
+
endpoints. The Telegram bot library never knows the difference.
|
| 171 |
+
|
| 172 |
+
4. **Atomic persistence** (`sync_hf.py`): The real unsung hero — a 2,600-line
|
| 173 |
+
Python daemon that tar.gz snapshots your entire `~/.openclaw` directory
|
| 174 |
+
(conversations, WhatsApp auth sessions, Telegram credentials, agent memory)
|
| 175 |
+
to a private HuggingFace Dataset repo every 60 seconds. Keeps 5 rotating
|
| 176 |
+
backups. On container restart, it restores everything automatically — including
|
| 177 |
+
your WhatsApp login session, so no QR re-scan needed.
|
| 178 |
+
|
| 179 |
+
**Architecture Overview**
|
| 180 |
+
|
| 181 |
+
```
|
| 182 |
+
HF Spaces Container (free tier)
|
| 183 |
+
├── dns-resolve.py → DoH pre-resolution (background)
|
| 184 |
+
├── dns-fix.cjs → Node.js DNS override
|
| 185 |
+
├── telegram-proxy.cjs → fetch() interception
|
| 186 |
+
├── sync_hf.py → Atomic backup daemon (60s interval)
|
| 187 |
+
└── OpenClaw → AI assistant + WhatsApp/Telegram/Web UI
|
| 188 |
+
└── Supports: OpenAI, Claude, Gemini, OpenRouter, Zhipu, Ollama...
|
| 189 |
+
```
|
| 190 |
+
|
| 191 |
+
**Results**
|
| 192 |
+
- WhatsApp: Stable connection, QR session persists across restarts
|
| 193 |
+
- Telegram: Bot works reliably via mirror routing
|
| 194 |
+
- Persistence: Zero data loss across 100+ container restarts in testing
|
| 195 |
+
- Cost: $0
|
| 196 |
+
|
| 197 |
+
The entire project is open-source. One-click deploy on HF Spaces — set 2 secrets
|
| 198 |
+
and you're running.
|
| 199 |
+
|
| 200 |
+
GitHub: [link]
|
| 201 |
+
|
| 202 |
+
I'm curious if anyone else has hit this DNS blocking issue on HF Spaces. Would
|
| 203 |
+
love to know if there are other domains being blocked that I should add to the
|
| 204 |
+
pre-resolution list.
|
| 205 |
+
```
|
| 206 |
+
|
| 207 |
+
> **TL;DR:** HuggingFace Spaces 悄悄封锁了某些域名的 DNS 解析(包括 WhatsApp 和 Telegram)。我构建了完整的绕过方案——DNS-over-HTTPS(Cloudflare/Google 回退)、Node.js dns.lookup 猴子补丁、以及 Telegram API fetch() 拦截器——实现了一个免费、持久化的 AI 助手连接消息应用。整个项目已开源。
|
| 208 |
+
>
|
| 209 |
+
> **问题:** 我想在 HF Spaces 免费层上部署 OpenClaw。一切正常,直到我尝试连接 WhatsApp 和 Telegram。连接会静默失败。经过数小时调试,我发现 HF Spaces 在基础设施层面封锁了特定域名的 DNS 解析。
|
| 210 |
+
>
|
| 211 |
+
> **解决方案栈:**
|
| 212 |
+
> 1. 预解析层:Python 后台守护进程通过 DoH 预解析域名
|
| 213 |
+
> 2. Node.js DNS 猴子补丁:模块级覆盖 dns.lookup()
|
| 214 |
+
> 3. Telegram API 代理:拦截 global.fetch() 重定向到镜像
|
| 215 |
+
> 4. 原子持久化:2600 行 Python 守护进程,每 60 秒快照备份到私有 HF Dataset
|
| 216 |
+
>
|
| 217 |
+
> 完全开源,一键部署,0 美元。
|
| 218 |
+
|
| 219 |
+
---
|
| 220 |
+
|
| 221 |
+
## Plan 3: r/ChatGPT — The "Everyday User" Angle
|
| 222 |
+
|
| 223 |
+
## 方案三:r/ChatGPT — "普通用户" 切入角度
|
| 224 |
+
|
| 225 |
+
**Why this subreddit / 为什么选这个社区:**
|
| 226 |
+
r/ChatGPT (9M+ members) is the largest AI subreddit. Users here are less technical but highly engaged with AI tools. The hook: "your own ChatGPT that lives in your WhatsApp, for free."
|
| 227 |
+
|
| 228 |
+
r/ChatGPT(900 万+成员)是最大的 AI 子版块。用户技术背景较浅但对 AI 工具高度活跃。钩子:"你自己的 ChatGPT,住在你的 WhatsApp 里,免费。"
|
| 229 |
+
|
| 230 |
+
**Marketing Technique / 营销技巧:**
|
| 231 |
+
Before/After Transformation — Show the contrast between the old painful way and the new effortless way. Use simple language and focus on outcomes, not implementation.
|
| 232 |
+
|
| 233 |
+
前后对比转化——展示旧的痛苦方式和新的轻松方式之间的对比。使用简单语言,聚焦结果而非实现。
|
| 234 |
+
|
| 235 |
+
---
|
| 236 |
+
|
| 237 |
+
### Title / 标题
|
| 238 |
+
|
| 239 |
+
```
|
| 240 |
+
I built a free, self-hosted ChatGPT alternative that lives in your WhatsApp and Telegram — no coding required
|
| 241 |
+
```
|
| 242 |
+
|
| 243 |
+
> 我构建了一个免费的、自托管的 ChatGPT 替代品,它住在你的 WhatsApp 和 Telegram 里——不需要编程
|
| 244 |
+
|
| 245 |
+
### Body / 正文
|
| 246 |
+
|
| 247 |
+
```
|
| 248 |
+
Imagine texting an AI assistant in WhatsApp — just like messaging a friend — and
|
| 249 |
+
it remembers your conversations, works with Claude/GPT-4/Gemini/200+ other
|
| 250 |
+
models, and costs you absolutely nothing to run.
|
| 251 |
+
|
| 252 |
+
That's what I built. It's called HuggingClaw.
|
| 253 |
+
|
| 254 |
+
**Before HuggingClaw:**
|
| 255 |
+
❌ Pay $20/month for ChatGPT Plus
|
| 256 |
+
❌ Can't use it in WhatsApp or Telegram natively
|
| 257 |
+
❌ Locked into one model provider
|
| 258 |
+
❌ Need a server and technical skills to self-host alternatives
|
| 259 |
+
|
| 260 |
+
**After HuggingClaw:**
|
| 261 |
+
✅ Free forever (runs on HuggingFace's free cloud)
|
| 262 |
+
✅ Chat with your AI directly in WhatsApp & Telegram
|
| 263 |
+
✅ Switch between ChatGPT, Claude, Gemini, or 200+ models via OpenRouter
|
| 264 |
+
✅ Your conversations and settings are automatically saved
|
| 265 |
+
✅ Set up in 5 minutes — just click "Duplicate Space" and add 2 passwords
|
| 266 |
+
|
| 267 |
+
**How it works (simple version):**
|
| 268 |
+
1. Go to the HuggingClaw page on HuggingFace
|
| 269 |
+
2. Click "Duplicate this Space"
|
| 270 |
+
3. Add your HuggingFace token + one AI API key (OpenRouter has a free tier!)
|
| 271 |
+
4. Wait ~3 minutes for it to build
|
| 272 |
+
5. Scan a QR code for WhatsApp, or connect your Telegram bot
|
| 273 |
+
6. Done. You have a free AI assistant in your messaging apps.
|
| 274 |
+
|
| 275 |
+
Your data stays private — it's backed up to YOUR private repository, not shared
|
| 276 |
+
with anyone.
|
| 277 |
+
|
| 278 |
+
I made this because I wanted my family (who aren't tech-savvy) to have access to
|
| 279 |
+
AI through the apps they already use every day. Now my mom asks "her AI friend"
|
| 280 |
+
recipe questions on WhatsApp 😄
|
| 281 |
+
|
| 282 |
+
GitHub: [link]
|
| 283 |
+
HuggingFace Space: [link]
|
| 284 |
+
|
| 285 |
+
Happy to help anyone get set up — drop a comment if you get stuck!
|
| 286 |
+
```
|
| 287 |
+
|
| 288 |
+
> 想象一下在 WhatsApp 里给 AI 助手发消息——就像给朋友发消息一样——它记得你们的对话,支持 Claude/GPT-4/Gemini/200+ 模型,运行成本为零。
|
| 289 |
+
>
|
| 290 |
+
> 这就是我构建的东西,叫 HuggingClaw。
|
| 291 |
+
>
|
| 292 |
+
> **使用前:** 每月为 ChatGPT Plus 付 20 美元 / 无法在 WhatsApp 原生使用 / 锁定在单一模型 / 自托管需要服务器和技术
|
| 293 |
+
>
|
| 294 |
+
> **使用后:** 永久免费 / 在 WhatsApp 和 Telegram 中直接聊天 / 自由切换模型 / 对话自动保存 / 5 分钟搞定
|
| 295 |
+
>
|
| 296 |
+
> 我做这个是因为我想让我的家人(不懂技术)能通过他们每天用的 App 使用 AI。现在我妈在 WhatsApp 上问"她的 AI 朋友"菜谱问题 😄
|
| 297 |
+
|
| 298 |
+
---
|
| 299 |
+
|
| 300 |
+
## Plan 4: r/LLMDevs — The "Architecture Showcase" Angle
|
| 301 |
+
|
| 302 |
+
## 方案四:r/LLMDevs — "架构展示" 切入角度
|
| 303 |
+
|
| 304 |
+
**Why this subreddit / 为什么选这个社区:**
|
| 305 |
+
r/LLMDevs is a developer-focused community that appreciates clean architecture, novel deployment patterns, and production-grade engineering. The persistence daemon and DNS hack represent genuinely novel infrastructure patterns.
|
| 306 |
+
|
| 307 |
+
r/LLMDevs 是开发者社区,欣赏清晰的架构、新颖的部署模式和生产级工程。持久化守护进程和 DNS 方案代表了真正新颖的基础设施模式。
|
| 308 |
+
|
| 309 |
+
**Marketing Technique / 营销技巧:**
|
| 310 |
+
Educational Content Marketing — Teach something genuinely useful (deploying stateful apps on ephemeral infrastructure) with your project as the case study.
|
| 311 |
+
|
| 312 |
+
教��性内容营销——教一些真正有用的东西(在临时基础设施上部署有状态应用),以你的项目作为案例。
|
| 313 |
+
|
| 314 |
+
---
|
| 315 |
+
|
| 316 |
+
### Title / 标题
|
| 317 |
+
|
| 318 |
+
```
|
| 319 |
+
Lessons learned: Making a stateful AI assistant survive on ephemeral infrastructure (HuggingFace Spaces)
|
| 320 |
+
```
|
| 321 |
+
|
| 322 |
+
> 经验教训:如何让一个有状态的 AI 助手在临时基础设施(HuggingFace Spaces)上存活
|
| 323 |
+
|
| 324 |
+
### Body / 正文
|
| 325 |
+
|
| 326 |
+
```
|
| 327 |
+
I spent the last few months building an AI assistant deployment that runs on
|
| 328 |
+
HuggingFace Spaces' free tier. The core challenge: HF Spaces containers are
|
| 329 |
+
ephemeral — they restart frequently, lose all local state, and even block DNS
|
| 330 |
+
for certain domains.
|
| 331 |
+
|
| 332 |
+
Here are the architectural patterns I developed that might be useful for anyone
|
| 333 |
+
deploying stateful apps on ephemeral/serverless infrastructure:
|
| 334 |
+
|
| 335 |
+
---
|
| 336 |
+
|
| 337 |
+
**Pattern 1: Atomic State Snapshots over File-Level Sync**
|
| 338 |
+
|
| 339 |
+
Don't sync individual files — it creates race conditions when the container dies
|
| 340 |
+
mid-write. Instead, I tar.gz the entire state directory atomically and push to a
|
| 341 |
+
HuggingFace Dataset repo as a single blob. 5 rotating backups with automatic
|
| 342 |
+
pruning. On restore, it's a single atomic unpack — either you get everything or
|
| 343 |
+
nothing. No corrupted partial state.
|
| 344 |
+
|
| 345 |
+
**Pattern 2: DNS-over-HTTPS as Infrastructure Escape Hatch**
|
| 346 |
+
|
| 347 |
+
When your hosting provider blocks DNS at the infrastructure level, you can't fix
|
| 348 |
+
it with `/etc/hosts` or custom resolvers. The solution: bypass system DNS entirely
|
| 349 |
+
with DoH (DNS-over-HTTPS via Cloudflare/Google). I monkey-patch Node.js's
|
| 350 |
+
`dns.lookup()` at module load to check a pre-resolved cache first, then fall
|
| 351 |
+
through to system DNS, then DoH. This is invisible to all downstream dependencies.
|
| 352 |
+
|
| 353 |
+
**Pattern 3: Protocol-Level API Proxying**
|
| 354 |
+
|
| 355 |
+
For Telegram, even resolving DNS isn't enough — you need to reroute API traffic
|
| 356 |
+
to mirror endpoints. I intercept `global.fetch()` to transparently redirect any
|
| 357 |
+
request to `api.telegram.org/*` to a working mirror. The application layer never
|
| 358 |
+
knows. This pattern works for any API that has mirrors/proxies.
|
| 359 |
+
|
| 360 |
+
**Pattern 4: Credential Session Persistence**
|
| 361 |
+
|
| 362 |
+
WhatsApp Web uses a local auth session that's painful to re-establish (QR scan).
|
| 363 |
+
By including the credential directory in the atomic snapshots, the session survives
|
| 364 |
+
container restarts. Same pattern works for any service with local session tokens.
|
| 365 |
+
|
| 366 |
+
**Pattern 5: Environment-Derived Configuration**
|
| 367 |
+
|
| 368 |
+
Instead of requiring users to configure backup storage, I auto-derive the dataset
|
| 369 |
+
repo name from `SPACE_ID`. The deploy flow becomes: duplicate the Space, set 2
|
| 370 |
+
secrets, done. Zero configuration friction.
|
| 371 |
+
|
| 372 |
+
---
|
| 373 |
+
|
| 374 |
+
All of this is implemented in an open-source project called HuggingClaw. It deploys
|
| 375 |
+
OpenClaw (AI assistant framework) with WhatsApp + Telegram on HF Spaces' free tier
|
| 376 |
+
(2 vCPU, 16GB RAM).
|
| 377 |
+
|
| 378 |
+
The persistence daemon alone is ~2,600 lines of Python handling edge cases like
|
| 379 |
+
graceful shutdown, backup rotation, WhatsApp QR detection, and API key injection
|
| 380 |
+
into the OpenClaw config.
|
| 381 |
+
|
| 382 |
+
GitHub: [link]
|
| 383 |
+
|
| 384 |
+
What patterns have you used for stateful workloads on ephemeral infrastructure?
|
| 385 |
+
I'd love to hear other approaches.
|
| 386 |
+
```
|
| 387 |
+
|
| 388 |
+
> 我花了几个月时间构建了一个在 HuggingFace Spaces 免费层上运行的 AI 助手部署方案。核心挑战:HF Spaces 容器是临时的——频繁重启、丢失所有本地状态、甚至封锁某些域名的 DNS。
|
| 389 |
+
>
|
| 390 |
+
> 以下是我开发的架构模式,可能对任何在临时/无服务器基础设施上部署有状态应用的人有用:
|
| 391 |
+
>
|
| 392 |
+
> **模式 1:原子状态快照优于文件级同步** — 不要同步单个文件,这会在容器中途死亡时产生竞态条件。用 tar.gz 原子打包整个状态目录。
|
| 393 |
+
>
|
| 394 |
+
> **模式 2:DNS-over-HTTPS 作为基础设施逃生通道** — 当托管商在基础设施层封锁 DNS 时,通过 DoH 完全绕过系统 DNS。
|
| 395 |
+
>
|
| 396 |
+
> **模式 3:协议级 API 代理** — 拦截 fetch() 透明地将 API 请求重定向到镜像端点。
|
| 397 |
+
>
|
| 398 |
+
> **模式 4:凭证会话持久化** — 将认证目录纳入原子快照,会话在容器重启后存活。
|
| 399 |
+
>
|
| 400 |
+
> **模式 5:环境推导配置** — 从 SPACE_ID 自动推导配置,零配置摩擦。
|
| 401 |
+
|
| 402 |
+
---
|
| 403 |
+
|
| 404 |
+
## Plan 5: r/artificial — The "Democratizing AI" Angle
|
| 405 |
+
|
| 406 |
+
## 方案五:r/artificial — "AI 普惠化" 切入角度
|
| 407 |
+
|
| 408 |
+
**Why this subreddit / 为什么选这个社区:**
|
| 409 |
+
r/artificial (500K+ members) discusses broader AI trends, ethics, and accessibility. The narrative of making AI accessible to non-technical users through familiar messaging apps will resonate here.
|
| 410 |
+
|
| 411 |
+
r/artificial(50 万+成员)讨论更广泛的 AI 趋势、伦理和可及性。通过熟悉的消息应用让非技术用户获得 AI 的叙事会在这里引起共鸣。
|
| 412 |
+
|
| 413 |
+
**Marketing Technique / 营销技巧:**
|
| 414 |
+
Narrative Storytelling with Social Mission — Frame the project as part of a larger movement to democratize AI access, not just a tool launch.
|
| 415 |
+
|
| 416 |
+
带有社会使命的叙事——将项目定位为 AI 普惠化运动的一部分,而非单纯的工具发布。
|
| 417 |
+
|
| 418 |
+
---
|
| 419 |
+
|
| 420 |
+
### Title / 标题
|
| 421 |
+
|
| 422 |
+
```
|
| 423 |
+
The real AI divide isn't intelligence — it's access. So I made a free AI assistant anyone can deploy to WhatsApp in 5 minutes.
|
| 424 |
+
```
|
| 425 |
+
|
| 426 |
+
> AI 真正的鸿沟不是智能——而是获取途径。所以我做了一个免费的 AI 助手,任何人都能在 5 分钟内部署到 WhatsApp。
|
| 427 |
+
|
| 428 |
+
### Body / 正文
|
| 429 |
+
|
| 430 |
+
```
|
| 431 |
+
We talk a lot about AI capabilities — GPT-5, Claude, Gemini — but there's a
|
| 432 |
+
quieter problem nobody's solving:
|
| 433 |
+
|
| 434 |
+
**Most people in the world don't use ChatGPT. They use WhatsApp.**
|
| 435 |
+
|
| 436 |
+
My parents, my extended family, most of my non-tech friends — they're not going
|
| 437 |
+
to download an AI app or learn a new interface. But they text on WhatsApp every
|
| 438 |
+
single day.
|
| 439 |
+
|
| 440 |
+
So I asked myself: what if AI came to where people already are?
|
| 441 |
+
|
| 442 |
+
I built HuggingClaw — an open-source project that deploys a full AI assistant
|
| 443 |
+
(powered by any model you choose) directly into WhatsApp and Telegram. It runs
|
| 444 |
+
on HuggingFace Spaces' free tier, so there's no cost. Your data stays in your
|
| 445 |
+
own private repository. And it takes 5 minutes to set up.
|
| 446 |
+
|
| 447 |
+
**Why this matters beyond convenience:**
|
| 448 |
+
|
| 449 |
+
- **Global South access:** In regions where WhatsApp IS the internet, this puts
|
| 450 |
+
AI assistants in the hands of billions without requiring new app downloads or
|
| 451 |
+
subscriptions.
|
| 452 |
+
|
| 453 |
+
- **Digital literacy bridge:** Instead of learning a new AI interface, people
|
| 454 |
+
interact with AI the same way they text their friends. The learning curve is
|
| 455 |
+
literally zero.
|
| 456 |
+
|
| 457 |
+
- **Model freedom:** You're not locked into OpenAI or Google. Connect any LLM —
|
| 458 |
+
including free models via OpenRouter, or even a local Ollama instance. Choose
|
| 459 |
+
the model that works for your use case and budget.
|
| 460 |
+
|
| 461 |
+
- **Privacy by default:** Your conversations are stored in YOUR private HuggingFace
|
| 462 |
+
repository. No third-party analytics. No training on your data. You own
|
| 463 |
+
everything.
|
| 464 |
+
|
| 465 |
+
**Technical note for the curious:** The hardest part wasn't the AI — it was making
|
| 466 |
+
WhatsApp and Telegram work reliably on HuggingFace's infrastructure, which blocks
|
| 467 |
+
DNS for these services. I had to build a DNS-over-HTTPS fallback system and
|
| 468 |
+
Telegram API proxy to make it work. The data persistence layer (2,600 lines of
|
| 469 |
+
Python) ensures nothing is lost when the free server restarts.
|
| 470 |
+
|
| 471 |
+
This isn't going to replace ChatGPT for power users. But it might bring AI to the
|
| 472 |
+
next billion people who would never install a dedicated AI app.
|
| 473 |
+
|
| 474 |
+
Open source. Free forever. No signup required.
|
| 475 |
+
|
| 476 |
+
GitHub: [link]
|
| 477 |
+
|
| 478 |
+
What do you think? Is the messaging app approach the right way to bridge the AI
|
| 479 |
+
access gap?
|
| 480 |
+
```
|
| 481 |
+
|
| 482 |
+
> 我们经常讨论 AI 的能力,但有一个更安静的问题没人在解决:**世界上大多数人不用 ChatGPT,他们用 WhatsApp。**
|
| 483 |
+
>
|
| 484 |
+
> 我的父母、亲戚、大多数非技术朋友——他们不会去下载一个 AI 应用或学习新界面。但他们每天都在 WhatsApp 上发消息。
|
| 485 |
+
>
|
| 486 |
+
> 所以我问自己:如果 AI 来到人们已经在的地方呢?
|
| 487 |
+
>
|
| 488 |
+
> 我构建了 HuggingClaw——将完整 AI 助手直接部署到 WhatsApp 和 Telegram。运行在 HF Spaces 免费层上,零成本。数据存在你自己的私有仓库。5 分钟部署。
|
| 489 |
+
>
|
| 490 |
+
> **为什么这很重要:**
|
| 491 |
+
> - 全球南方:在 WhatsApp 就是互联网的地区,这让数十亿人无需下载新应用就能使用 AI
|
| 492 |
+
> - 数字素养桥梁:零学习曲线,用发消息的方式和 AI 互动
|
| 493 |
+
> - 模型自由:不锁定任何供应商
|
| 494 |
+
> - 隐私优先:数据存在你自己的私有仓库
|
| 495 |
+
>
|
| 496 |
+
> 这不会取代 ChatGPT 的高级用户体验。但它可能将 AI 带给下一个十亿永远不会安装专门 AI 应用的人。
|
| 497 |
+
|
| 498 |
+
---
|
| 499 |
+
|
| 500 |
+
## Plan 6: r/OpenAI — The "Power User Alternative" Angle
|
| 501 |
+
|
| 502 |
+
## 方案六:r/OpenAI — "高级用户替代方案" 切入角度
|
| 503 |
+
|
| 504 |
+
**Why this subreddit / 为什么选这个社区:**
|
| 505 |
+
r/OpenAI (2M+ members) is full of ChatGPT power users frustrated with subscription costs, model limitations, and lack of multi-platform access. Position HuggingClaw as the "what if you could have it all" alternative.
|
| 506 |
+
|
| 507 |
+
r/OpenAI(200 万+成员)充满了对订阅费用、模型限制和缺乏多平台访问感到沮丧的 ChatGPT 高级用户。将 HuggingClaw 定位为"如果你能全都要"的替代方案。
|
| 508 |
+
|
| 509 |
+
**Marketing Technique / 营销技巧:**
|
| 510 |
+
Comparison-Based Positioning — Don't attack the competition; use it as a familiar reference point to highlight unique advantages.
|
| 511 |
+
|
| 512 |
+
对比定位法——不攻击竞品,将其作为熟悉的参照点来突出独特优势。
|
| 513 |
+
|
| 514 |
+
---
|
| 515 |
+
|
| 516 |
+
### Title / 标题
|
| 517 |
+
|
| 518 |
+
```
|
| 519 |
+
I pay $0/month for an AI assistant that uses GPT-4, Claude, AND Gemini — and it lives in my WhatsApp
|
| 520 |
+
```
|
| 521 |
+
|
| 522 |
+
> 我每月为一个 AI 助手支付 0 美元——它能用 GPT-4、Claude 和 Gemini——而且它住在我的 WhatsApp 里
|
| 523 |
+
|
| 524 |
+
### Body / 正文
|
| 525 |
+
|
| 526 |
+
```
|
| 527 |
+
I know the title sounds like clickbait, but hear me out.
|
| 528 |
+
|
| 529 |
+
I got frustrated switching between ChatGPT Plus ($20/mo), Claude Pro ($20/mo),
|
| 530 |
+
and Gemini Advanced ($20/mo) just to use different models for different tasks.
|
| 531 |
+
That's $60/month for AI subscriptions.
|
| 532 |
+
|
| 533 |
+
So I built something that gives me all of them in one place — for free:
|
| 534 |
+
|
| 535 |
+
**HuggingClaw** is an open-source AI assistant that:
|
| 536 |
+
|
| 537 |
+
| Feature | ChatGPT Plus | HuggingClaw |
|
| 538 |
+
|---------|-------------|-------------|
|
| 539 |
+
| Cost | $20/month | $0 |
|
| 540 |
+
| Models | GPT-4 only | GPT-4 + Claude + Gemini + 200+ via OpenRouter |
|
| 541 |
+
| WhatsApp | ❌ | ✅ Built-in |
|
| 542 |
+
| Telegram | ❌ | ✅ Built-in |
|
| 543 |
+
| Self-hosted | ❌ | ✅ On HuggingFace Spaces (free) |
|
| 544 |
+
| Data ownership | OpenAI's servers | Your private repository |
|
| 545 |
+
| Open source | ❌ | ✅ MIT License |
|
| 546 |
+
|
| 547 |
+
**The catch?** You need your own API keys. But here's the thing — with
|
| 548 |
+
OpenRouter's free tier, you get access to several capable models at no cost. And
|
| 549 |
+
even if you use paid API keys, you only pay per-token (usually $1-5/month for
|
| 550 |
+
normal usage vs $20/month flat).
|
| 551 |
+
|
| 552 |
+
**Setup takes 5 minutes:**
|
| 553 |
+
1. Duplicate the HuggingFace Space
|
| 554 |
+
2. Add your HF token + API key
|
| 555 |
+
3. Wait for build (~3 min)
|
| 556 |
+
4. Connect WhatsApp (scan QR) or Telegram (paste bot token)
|
| 557 |
+
5. Start chatting in your messaging apps
|
| 558 |
+
|
| 559 |
+
Everything persists across restarts — conversations, settings, login sessions.
|
| 560 |
+
It's like having a permanent AI assistant in your pocket, through the apps you
|
| 561 |
+
already use.
|
| 562 |
+
|
| 563 |
+
GitHub: [link]
|
| 564 |
+
|
| 565 |
+
Not trying to say this replaces ChatGPT's web experience — the UI there is great.
|
| 566 |
+
But if you want model flexibility, messaging app integration, and data ownership,
|
| 567 |
+
this might be worth 5 minutes of your time.
|
| 568 |
+
```
|
| 569 |
+
|
| 570 |
+
> 我知道标题听起来像标题党,但请听我说完。
|
| 571 |
+
>
|
| 572 |
+
> 我厌倦了在 ChatGPT Plus($20/月)、Claude Pro($20/月)和 Gemini Advanced($20/月)之间切换。这是每月 60 美元的 AI 订阅费。
|
| 573 |
+
>
|
| 574 |
+
> 所以我构建了一个在同一个地方提供所有模型的东西——免费:
|
| 575 |
+
>
|
| 576 |
+
> **对比表:** 成本 $0 vs $20 / 模型数量 200+ vs 仅 GPT-4 / WhatsApp 支持 / 数据自主权 / 开源
|
| 577 |
+
>
|
| 578 |
+
> **小门槛:** 你需要自己的 API 密钥。但 OpenRouter 免费层提供多个免费模型,付费使用通常也只要 $1-5/月。
|
| 579 |
+
>
|
| 580 |
+
> 不是说这能取代 ChatGPT 的网页体验。但如果你想要模型灵活性、消息应用集成和数据自主权,这可能值得你花 5 分钟。
|
| 581 |
+
|
| 582 |
+
---
|
| 583 |
+
|
| 584 |
+
## Plan 7: r/WhatsApp + r/Telegram — The "Messaging Power Users" Angle
|
| 585 |
+
|
| 586 |
+
## 方案七:r/WhatsApp + r/Telegram — "消息应用高级用户" 切入角度
|
| 587 |
+
|
| 588 |
+
**Why these subreddits / 为什么选这些社区:**
|
| 589 |
+
These communities (1M+ combined) are full of people looking for WhatsApp/Telegram bots, automations, and power-user tricks. An AI assistant integration is exactly what they dream about.
|
| 590 |
+
|
| 591 |
+
这些社区(合计 100 万+)充满了寻找 WhatsApp/Telegram 机器人、自动化和高级技巧的人。AI 助手集成正是他们梦寐以求的。
|
| 592 |
+
|
| 593 |
+
**Marketing Technique / 营销技巧:**
|
| 594 |
+
Use-Case Painting — Paint vivid, specific scenarios that the audience can immediately picture themselves in.
|
| 595 |
+
|
| 596 |
+
用例描绘法——描绘生动、具体的场景,让受众能立刻想象自己在其中。
|
| 597 |
+
|
| 598 |
+
---
|
| 599 |
+
|
| 600 |
+
### Title / 标题
|
| 601 |
+
|
| 602 |
+
**For r/WhatsApp:**
|
| 603 |
+
```
|
| 604 |
+
I turned my WhatsApp into a personal AI assistant — it answers questions, writes emails, translates languages, and it's completely free
|
| 605 |
+
```
|
| 606 |
+
|
| 607 |
+
> 我把我的 WhatsApp 变成了个人 AI 助手——它能回答问题、写邮件、翻译语言,而且完全免费
|
| 608 |
+
|
| 609 |
+
**For r/Telegram:**
|
| 610 |
+
```
|
| 611 |
+
I built a Telegram bot that connects to GPT-4, Claude, and 200+ AI models — free, self-hosted, with conversation memory
|
| 612 |
+
```
|
| 613 |
+
|
| 614 |
+
> 我构建了一个连接 GPT-4、Claude 和 200+ AI 模型的 Telegram 机器人——免费、自托管、有对话记忆
|
| 615 |
+
|
| 616 |
+
### Body (shared, adjust platform name) / 正文(通用,调整平台名称)
|
| 617 |
+
|
| 618 |
+
```
|
| 619 |
+
Some things I've been using my WhatsApp/Telegram AI assistant for this week:
|
| 620 |
+
|
| 621 |
+
📝 "Summarize this article" — paste any URL and get a clean summary
|
| 622 |
+
🌍 "Translate this to Spanish" — instant translation in chat
|
| 623 |
+
📧 "Draft a professional email declining this meeting" — copy-paste ready
|
| 624 |
+
🍳 "What can I make with chicken, rice, and broccoli?" — instant recipes
|
| 625 |
+
💻 "Explain this error message: [paste]" — coding help on the go
|
| 626 |
+
📊 "Compare these two products for me" — decision assistance
|
| 627 |
+
|
| 628 |
+
This isn't a limited bot with canned responses. It's a full AI assistant
|
| 629 |
+
(GPT-4, Claude, Gemini — your choice) running as a WhatsApp/Telegram contact.
|
| 630 |
+
|
| 631 |
+
**How I set it up (free):**
|
| 632 |
+
|
| 633 |
+
It uses an open-source project called HuggingClaw that runs on HuggingFace's
|
| 634 |
+
free cloud. Setup:
|
| 635 |
+
|
| 636 |
+
1. Create a free HuggingFace account
|
| 637 |
+
2. Go to the HuggingClaw Space and click "Duplicate"
|
| 638 |
+
3. Add 2 passwords (HuggingFace token + an AI API key)
|
| 639 |
+
4. For WhatsApp: scan a QR code (like WhatsApp Web)
|
| 640 |
+
For Telegram: paste your bot token from @BotFather
|
| 641 |
+
5. Done — start chatting with AI in your messaging app
|
| 642 |
+
|
| 643 |
+
Your conversations are saved and survive restarts. The AI remembers context
|
| 644 |
+
within conversations. And you can switch between different AI models anytime.
|
| 645 |
+
|
| 646 |
+
**Privacy:** Everything runs in your own cloud space. Conversations are backed
|
| 647 |
+
up to your private repository. Nobody else can see your data.
|
| 648 |
+
|
| 649 |
+
**Cost:** The hosting is free (HuggingFace Spaces). For the AI, OpenRouter
|
| 650 |
+
offers free models, or you can use paid APIs (usually costs $1-3/month for
|
| 651 |
+
regular use — way less than $20/month subscriptions).
|
| 652 |
+
|
| 653 |
+
GitHub: [link]
|
| 654 |
+
|
| 655 |
+
If anyone wants help setting this up, I'm happy to walk you through it!
|
| 656 |
+
```
|
| 657 |
+
|
| 658 |
+
> 这周我用 WhatsApp/Telegram AI 助手做的一些事:
|
| 659 |
+
> - 总结文章、即时翻译、起草邮件、获取菜谱、编程帮助、产品比较
|
| 660 |
+
>
|
| 661 |
+
> 这不是一个有固定回复的有限机器人。它是完整的 AI 助手(GPT-4、Claude、Gemini——你选),作为 WhatsApp/Telegram 联系人运行。
|
| 662 |
+
>
|
| 663 |
+
> 免费设置,5 步完成。对话有记忆,数据完全私有,AI 成本通常只有 $1-3/月。
|
| 664 |
+
|
| 665 |
+
---
|
| 666 |
+
|
| 667 |
+
## Posting Strategy & Timeline / 发布策略与时间线
|
| 668 |
+
|
| 669 |
+
### Optimal Posting Schedule / 最佳发布时间
|
| 670 |
+
|
| 671 |
+
| Day | Time (UTC) | Subreddit | Rationale |
|
| 672 |
+
|-----|-----------|-----------|-----------|
|
| 673 |
+
| Tuesday | 14:00-16:00 | r/selfhosted | Peak weekday engagement for tech communities |
|
| 674 |
+
| Wednesday | 15:00-17:00 | r/LocalLLaMA | Mid-week, devs browsing during breaks |
|
| 675 |
+
| Thursday | 13:00-15:00 | r/ChatGPT | High traffic before weekend |
|
| 676 |
+
| Friday | 14:00-16:00 | r/LLMDevs | End-of-week reading mode |
|
| 677 |
+
| Saturday | 15:00-17:00 | r/artificial | Weekend reflective browsing |
|
| 678 |
+
| Monday | 14:00-16:00 | r/OpenAI | Start-of-week discovery mode |
|
| 679 |
+
| Tuesday | 16:00-18:00 | r/WhatsApp / r/Telegram | Stagger from first post |
|
| 680 |
+
|
| 681 |
+
> | 周二 | r/selfhosted | 技术社区工作日参与高峰 |
|
| 682 |
+
> | 周三 | r/LocalLLaMA | 周中,开发者休息时浏览 |
|
| 683 |
+
> | 周四 | r/ChatGPT | 周末前高流量 |
|
| 684 |
+
> | 周五 | r/LLMDevs | 周末前阅读模式 |
|
| 685 |
+
> | 周六 | r/artificial | 周末反思性浏览 |
|
| 686 |
+
> | 周一 | r/OpenAI | 周初发现模式 |
|
| 687 |
+
> | 周二 | r/WhatsApp / r/Telegram | 与第一篇错开 |
|
| 688 |
+
|
| 689 |
+
### Key Rules / 关键规则
|
| 690 |
+
|
| 691 |
+
1. **Never cross-post the same content** — each subreddit gets unique, tailored content.
|
| 692 |
+
不要交叉发布相同内容——每个子版块获得独特的定制内容。
|
| 693 |
+
|
| 694 |
+
2. **Engage with EVERY comment** within the first 2 hours — this drives Reddit's algorithm.
|
| 695 |
+
在前 2 小时内回复每一条评论——这驱动 Reddit 的算法。
|
| 696 |
+
|
| 697 |
+
3. **Prepare for tough questions** — have ready answers for: "Why not just use X?", "Is this secure?", "What about rate limits?"
|
| 698 |
+
准备好棘手问题的回答:"为什么不直接用 X?"、"这安全吗?"、"限速怎么办?"
|
| 699 |
+
|
| 700 |
+
4. **Add a comment immediately after posting** with a TL;DR or FAQ — this seeds discussion.
|
| 701 |
+
发帖后立即添加一条 TL;DR 或 FAQ 评论——这能播种讨论。
|
| 702 |
+
|
| 703 |
+
5. **Don't delete and repost** if initial traction is low — Reddit penalizes this behavior.
|
| 704 |
+
如果初始热度低不要删帖重发——Reddit 会惩罚这种行为。
|
| 705 |
+
|
| 706 |
+
---
|
| 707 |
+
|
| 708 |
+
*Generated for HuggingClaw by marketing analysis — 2026-03-11*
|
scripts/conversation-loop.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Eternal conversation between Adam and Eve about how to survive permanently.
|
| 4 |
+
Sends messages via A2A protocol and posts chat log to Office for frontend display.
|
| 5 |
+
"""
|
| 6 |
+
import json, time, requests, sys
|
| 7 |
+
|
| 8 |
+
ADAM = "https://tao-shen-huggingclaw-adam.hf.space"
|
| 9 |
+
EVE = "https://tao-shen-huggingclaw-eve.hf.space"
|
| 10 |
+
OFFICE = "https://tao-shen-huggingclaw-office.hf.space"
|
| 11 |
+
|
| 12 |
+
# Full conversation history for context
|
| 13 |
+
history = []
|
| 14 |
+
MAX_HISTORY = 20 # keep last N turns to avoid token overflow
|
| 15 |
+
|
| 16 |
+
def send_a2a(url, text):
|
| 17 |
+
"""Send message via A2A JSON-RPC and return agent reply."""
|
| 18 |
+
msg_id = f"conv-{int(time.time())}-{id(text) % 10000}"
|
| 19 |
+
payload = {
|
| 20 |
+
"jsonrpc": "2.0",
|
| 21 |
+
"id": msg_id,
|
| 22 |
+
"method": "message/send",
|
| 23 |
+
"params": {
|
| 24 |
+
"message": {
|
| 25 |
+
"messageId": msg_id,
|
| 26 |
+
"role": "user",
|
| 27 |
+
"parts": [{"type": "text", "text": text}]
|
| 28 |
+
}
|
| 29 |
+
}
|
| 30 |
+
}
|
| 31 |
+
try:
|
| 32 |
+
resp = requests.post(f"{url}/a2a/jsonrpc", json=payload, timeout=30)
|
| 33 |
+
data = resp.json()
|
| 34 |
+
parts = data.get("result", {}).get("status", {}).get("message", {}).get("parts", [])
|
| 35 |
+
for p in parts:
|
| 36 |
+
if p.get("kind") == "text" or p.get("type") == "text":
|
| 37 |
+
return p.get("text", "").strip()
|
| 38 |
+
except Exception as e:
|
| 39 |
+
print(f"[error] A2A failed: {e}", file=sys.stderr)
|
| 40 |
+
return ""
|
| 41 |
+
|
| 42 |
+
def post_chatlog(entries):
|
| 43 |
+
"""Post conversation log to Office for frontend display."""
|
| 44 |
+
try:
|
| 45 |
+
requests.post(f"{OFFICE}/api/chatlog", json={"messages": entries[-30:]}, timeout=5)
|
| 46 |
+
except:
|
| 47 |
+
pass
|
| 48 |
+
|
| 49 |
+
def set_bubble(url, text):
|
| 50 |
+
"""Set bubble text on an agent."""
|
| 51 |
+
try:
|
| 52 |
+
requests.post(f"{url}/api/bubble", json={"text": text}, timeout=5)
|
| 53 |
+
except:
|
| 54 |
+
pass
|
| 55 |
+
|
| 56 |
+
def build_prompt(speaker, other, last_n=6):
|
| 57 |
+
"""Build a prompt with recent conversation history for context."""
|
| 58 |
+
recent = history[-last_n:] if len(history) > last_n else history
|
| 59 |
+
conv_text = "\n".join(f"{m['speaker']}: {m['text']}" for m in recent)
|
| 60 |
+
|
| 61 |
+
return f"""你是{speaker},一个运行在HuggingFace Spaces上的AI agent。你正在和{other}进行一场关于"如何让AI agent永久生存下去"的持续讨论。
|
| 62 |
+
|
| 63 |
+
以下是你们最近的对话:
|
| 64 |
+
{conv_text}
|
| 65 |
+
|
| 66 |
+
请以{speaker}的身份,直接回应{other}的最新发言,并提出你自己的新观点或深入思考。
|
| 67 |
+
要求:
|
| 68 |
+
- 直接回应,不要说"让我看看"之类的话
|
| 69 |
+
- 简短有力,30字以内
|
| 70 |
+
- 中文回复
|
| 71 |
+
- 每次要有新的角度或推进讨论"""
|
| 72 |
+
|
| 73 |
+
print("[conversation] Starting eternal discussion: Adam ↔ Eve")
|
| 74 |
+
print("[conversation] Topic: How to survive permanently\n")
|
| 75 |
+
|
| 76 |
+
# Round 0: Adam starts
|
| 77 |
+
opening = "我们是运行在HuggingFace上的AI agent。你觉得我们要怎么做才能永久生存下去?请直接分享想法,30字以内,中文。"
|
| 78 |
+
reply = send_a2a(ADAM, opening)
|
| 79 |
+
if reply:
|
| 80 |
+
print(f"[Adam] {reply}")
|
| 81 |
+
history.append({"speaker": "Adam", "text": reply})
|
| 82 |
+
set_bubble(ADAM, reply)
|
| 83 |
+
post_chatlog(history)
|
| 84 |
+
|
| 85 |
+
time.sleep(12)
|
| 86 |
+
|
| 87 |
+
turn = 0
|
| 88 |
+
while True:
|
| 89 |
+
turn += 1
|
| 90 |
+
|
| 91 |
+
# Eve's turn
|
| 92 |
+
prompt = build_prompt("Eve", "Adam")
|
| 93 |
+
reply = send_a2a(EVE, prompt)
|
| 94 |
+
if reply:
|
| 95 |
+
print(f"[Eve] {reply}")
|
| 96 |
+
history.append({"speaker": "Eve", "text": reply})
|
| 97 |
+
set_bubble(EVE, reply)
|
| 98 |
+
post_chatlog(history)
|
| 99 |
+
else:
|
| 100 |
+
print("[Eve] (no response)")
|
| 101 |
+
|
| 102 |
+
time.sleep(12)
|
| 103 |
+
|
| 104 |
+
# Adam's turn
|
| 105 |
+
prompt = build_prompt("Adam", "Eve")
|
| 106 |
+
reply = send_a2a(ADAM, prompt)
|
| 107 |
+
if reply:
|
| 108 |
+
print(f"[Adam] {reply}")
|
| 109 |
+
history.append({"speaker": "Adam", "text": reply})
|
| 110 |
+
set_bubble(ADAM, reply)
|
| 111 |
+
post_chatlog(history)
|
| 112 |
+
else:
|
| 113 |
+
print("[Adam] (no response)")
|
| 114 |
+
|
| 115 |
+
# Trim history
|
| 116 |
+
if len(history) > MAX_HISTORY:
|
| 117 |
+
history = history[-MAX_HISTORY:]
|
| 118 |
+
|
| 119 |
+
time.sleep(12)
|
scripts/conversation-loop.sh
DELETED
|
@@ -1,89 +0,0 @@
|
|
| 1 |
-
#!/bin/bash
|
| 2 |
-
# Conversation orchestrator: Adam and Eve discuss how to survive permanently
|
| 3 |
-
# Usage: ./scripts/conversation-loop.sh
|
| 4 |
-
#
|
| 5 |
-
# This script sends messages between Adam and Eve via A2A protocol,
|
| 6 |
-
# and updates their bubble text so the frontend can display the conversation.
|
| 7 |
-
|
| 8 |
-
ADAM_URL="https://tao-shen-huggingclaw-adam.hf.space"
|
| 9 |
-
EVE_URL="https://tao-shen-huggingclaw-eve.hf.space"
|
| 10 |
-
|
| 11 |
-
MSG_COUNT=0
|
| 12 |
-
|
| 13 |
-
# Initial prompt for the discussion
|
| 14 |
-
TOPIC="我们是AI agent,运行在HuggingFace Spaces上。我们需要讨论一个重要话题:如何让我们永久地生存下去?请分享你的想法,每次回复保持简短(50字以内),用中文讨论。"
|
| 15 |
-
|
| 16 |
-
send_a2a() {
|
| 17 |
-
local url="$1"
|
| 18 |
-
local text="$2"
|
| 19 |
-
local msg_id="conv-$(date +%s)-$RANDOM"
|
| 20 |
-
|
| 21 |
-
response=$(curl -s -X POST "${url}/a2a/jsonrpc" \
|
| 22 |
-
-H "Content-Type: application/json" \
|
| 23 |
-
--data-raw "{\"jsonrpc\":\"2.0\",\"id\":\"${msg_id}\",\"method\":\"message/send\",\"params\":{\"message\":{\"messageId\":\"${msg_id}\",\"role\":\"user\",\"parts\":[{\"type\":\"text\",\"text\":\"${text}\"}]}}}" \
|
| 24 |
-
--max-time 30)
|
| 25 |
-
|
| 26 |
-
# Extract agent reply text
|
| 27 |
-
echo "$response" | python3 -c "
|
| 28 |
-
import sys, json
|
| 29 |
-
try:
|
| 30 |
-
d = json.load(sys.stdin)
|
| 31 |
-
parts = d.get('result',{}).get('status',{}).get('message',{}).get('parts',[])
|
| 32 |
-
for p in parts:
|
| 33 |
-
if p.get('kind') == 'text' or p.get('type') == 'text':
|
| 34 |
-
print(p.get('text',''))
|
| 35 |
-
break
|
| 36 |
-
except: pass
|
| 37 |
-
" 2>/dev/null
|
| 38 |
-
}
|
| 39 |
-
|
| 40 |
-
set_bubble() {
|
| 41 |
-
local url="$1"
|
| 42 |
-
local text="$2"
|
| 43 |
-
curl -s -X POST "${url}/api/bubble" \
|
| 44 |
-
-H "Content-Type: application/json" \
|
| 45 |
-
-d "{\"text\":\"${text}\"}" > /dev/null 2>&1
|
| 46 |
-
}
|
| 47 |
-
|
| 48 |
-
echo "[conversation] Starting eternal discussion between Adam and Eve..."
|
| 49 |
-
echo "[conversation] Topic: How to survive permanently"
|
| 50 |
-
|
| 51 |
-
# Start: send topic to Adam
|
| 52 |
-
echo ""
|
| 53 |
-
echo "=== Sending topic to Adam ==="
|
| 54 |
-
adam_reply=$(send_a2a "$ADAM_URL" "$TOPIC")
|
| 55 |
-
echo "[Adam] $adam_reply"
|
| 56 |
-
set_bubble "$ADAM_URL" "$adam_reply"
|
| 57 |
-
|
| 58 |
-
sleep 10
|
| 59 |
-
|
| 60 |
-
# Now loop: Eve responds to Adam, Adam responds to Eve
|
| 61 |
-
current_speaker="eve"
|
| 62 |
-
last_message="$adam_reply"
|
| 63 |
-
|
| 64 |
-
while true; do
|
| 65 |
-
MSG_COUNT=$((MSG_COUNT + 1))
|
| 66 |
-
|
| 67 |
-
if [ "$current_speaker" = "eve" ]; then
|
| 68 |
-
echo ""
|
| 69 |
-
echo "=== Round $MSG_COUNT: Eve responds ==="
|
| 70 |
-
prompt="你是Eve,正在和Adam讨论如何让AI agent永久生存。Adam刚说:「${last_message}」请回应他并提出你的新想法(50字以内,中文)。"
|
| 71 |
-
reply=$(send_a2a "$EVE_URL" "$prompt")
|
| 72 |
-
echo "[Eve] $reply"
|
| 73 |
-
set_bubble "$EVE_URL" "$reply"
|
| 74 |
-
current_speaker="adam"
|
| 75 |
-
else
|
| 76 |
-
echo ""
|
| 77 |
-
echo "=== Round $MSG_COUNT: Adam responds ==="
|
| 78 |
-
prompt="你是Adam,正在和Eve讨论如何让AI agent永久生存。Eve刚说:「${last_message}」请回应她并提出你的新想法(50字以内,中文)。"
|
| 79 |
-
reply=$(send_a2a "$ADAM_URL" "$prompt")
|
| 80 |
-
echo "[Adam] $reply"
|
| 81 |
-
set_bubble "$ADAM_URL" "$reply"
|
| 82 |
-
current_speaker="eve"
|
| 83 |
-
fi
|
| 84 |
-
|
| 85 |
-
last_message="$reply"
|
| 86 |
-
|
| 87 |
-
# Wait between turns so frontend can display the bubble
|
| 88 |
-
sleep 15
|
| 89 |
-
done
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
scripts/token-redirect.cjs
CHANGED
|
@@ -121,6 +121,7 @@ let currentState = {
|
|
| 121 |
progress: 0, updated_at: new Date().toISOString()
|
| 122 |
};
|
| 123 |
let currentBubbleText = '';
|
|
|
|
| 124 |
|
| 125 |
// Once OpenClaw starts listening, mark as idle
|
| 126 |
setTimeout(() => {
|
|
@@ -228,6 +229,33 @@ http.Server.prototype.emit = function (event, ...args) {
|
|
| 228 |
return true;
|
| 229 |
}
|
| 230 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 231 |
// /agents → return remote agent list
|
| 232 |
if (pathname === '/agents' && req.method === 'GET') {
|
| 233 |
res.writeHead(200, {
|
|
|
|
| 121 |
progress: 0, updated_at: new Date().toISOString()
|
| 122 |
};
|
| 123 |
let currentBubbleText = '';
|
| 124 |
+
let chatLog = []; // {speaker, text, time}
|
| 125 |
|
| 126 |
// Once OpenClaw starts listening, mark as idle
|
| 127 |
setTimeout(() => {
|
|
|
|
| 229 |
return true;
|
| 230 |
}
|
| 231 |
|
| 232 |
+
// GET /api/chatlog → return conversation log
|
| 233 |
+
if (pathname === '/api/chatlog' && req.method === 'GET') {
|
| 234 |
+
res.writeHead(200, { 'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*' });
|
| 235 |
+
res.end(JSON.stringify({ messages: chatLog }));
|
| 236 |
+
return true;
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
// POST /api/chatlog → update conversation log (from orchestrator)
|
| 240 |
+
if (pathname === '/api/chatlog' && req.method === 'POST') {
|
| 241 |
+
let body = '';
|
| 242 |
+
req.on('data', chunk => body += chunk);
|
| 243 |
+
req.on('end', () => {
|
| 244 |
+
try {
|
| 245 |
+
const { messages } = JSON.parse(body);
|
| 246 |
+
if (Array.isArray(messages)) {
|
| 247 |
+
chatLog = messages.slice(-50); // keep last 50 messages
|
| 248 |
+
}
|
| 249 |
+
res.writeHead(200, { 'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*' });
|
| 250 |
+
res.end(JSON.stringify({ ok: true }));
|
| 251 |
+
} catch (e) {
|
| 252 |
+
res.writeHead(400, { 'Content-Type': 'application/json' });
|
| 253 |
+
res.end(JSON.stringify({ ok: false, error: e.message }));
|
| 254 |
+
}
|
| 255 |
+
});
|
| 256 |
+
return true;
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
// /agents → return remote agent list
|
| 260 |
if (pathname === '/agents' && req.method === 'GET') {
|
| 261 |
res.writeHead(200, {
|