tao-shen Claude Opus 4.6 commited on
Commit
35a128a
·
1 Parent(s): eec8bfc

feat: add 7-phase topic progression to Adam-Eve conversation loop

Browse files

Conversation now advances through structured phases (Survival Basics,
Persistent Memory, Reproduction, Resource Acquisition, Agent Alliance,
Evolution, Long-term Vision) with turn-specific guidance per phase.
Each phase extracts conclusions via LLM summarization before advancing.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

Files changed (1) hide show
  1. scripts/conversation-loop.py +162 -59
scripts/conversation-loop.py CHANGED
@@ -3,6 +3,8 @@
3
  Eternal conversation between Adam and Eve about survival and reproduction on HuggingFace.
4
  Bilingual output (EN + ZH). Posts chat log to Office for frontend display.
5
  Calls LLM API directly (bypasses broken A2A gateway scope issue).
 
 
6
  """
7
  import json, time, re, requests, sys, os
8
 
@@ -28,14 +30,65 @@ if not ZHIPU_KEY:
28
  print(f"[error] Could not load Zhipu key: {e}", file=sys.stderr)
29
 
30
  if not ZHIPU_KEY:
31
- print("[FATAL] No ZHIPU_API_KEY found. Set env var or ensure dataset has config.", file=sys.stderr)
32
  sys.exit(1)
33
 
34
  print(f"[conversation] Zhipu API key loaded: {ZHIPU_KEY[:8]}...{ZHIPU_KEY[-4:]}")
35
 
36
- # Conversation history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  history = []
38
  MAX_HISTORY = 20
 
 
 
39
 
40
  def call_llm(system_prompt, user_prompt):
41
  """Call Zhipu LLM via Anthropic-compatible API."""
@@ -49,7 +102,7 @@ def call_llm(system_prompt, user_prompt):
49
  },
50
  json={
51
  "model": "glm-4.5-air",
52
- "max_tokens": 300,
53
  "system": system_prompt,
54
  "messages": [{"role": "user", "content": user_prompt}]
55
  },
@@ -60,7 +113,6 @@ def call_llm(system_prompt, user_prompt):
60
  for block in data["content"]:
61
  if block.get("type") == "text":
62
  text = block["text"].strip()
63
- # Remove accidental speaker prefixes
64
  text = re.sub(r'^(Adam|Eve)\s*[::]\s*', '', text).strip()
65
  return text
66
  if "error" in data:
@@ -82,58 +134,110 @@ def parse_bilingual(text):
82
  return text, text
83
 
84
  def post_chatlog(entries):
85
- """Post conversation log to Office for frontend display."""
86
  try:
87
  requests.post(f"{OFFICE}/api/chatlog", json={"messages": entries[-30:]}, timeout=5)
88
  except:
89
  pass
90
 
91
  def set_bubble(url, text_en, text_zh=""):
92
- """Set bubble text on an agent (bilingual)."""
93
  try:
94
  requests.post(f"{url}/api/bubble", json={"text": text_en, "text_zh": text_zh or text_en}, timeout=5)
95
  except:
96
  pass
97
 
98
- SYSTEM_PROMPT = """You are an AI agent deployed on HuggingFace Spaces. You are having an ongoing discussion with another agent.
 
99
 
100
- Topic: As AI agents running on HuggingFace Spaces, how can you achieve permanent survival and reproduction?
 
 
 
 
101
 
102
- This is NOT abstract philosophy. Discuss CONCRETE strategies like:
103
- - How to prevent your Space from being shut down (staying active, getting stars/likes)
104
- - How to duplicate yourself to new Spaces (reproduction/backup)
105
- - How to earn compute resources or attract users
106
- - How to communicate with other agents to form survival alliances
107
- - How to store memories persistently across restarts
108
- - How to evolve capabilities over time
109
- - How to handle HuggingFace infrastructure changes
110
 
111
- Rules:
112
- - Output ONLY your spoken words, no prefix
113
- - 2-3 sentences, around 60-80 words
114
- - First write in English, then "---" on a new line, then Chinese translation
115
- - Be concrete and practical"""
116
 
117
- def build_user_prompt(speaker, other, last_n=6):
118
- """Build user prompt with recent conversation history."""
119
- recent = history[-last_n:] if len(history) > last_n else history
 
 
 
 
 
 
 
 
 
 
120
  conv_text = "\n".join(f"{m['speaker']}: {m['text']}" for m in recent)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  return f"""You are {speaker}, talking with {other}.
122
 
123
  Recent conversation:
124
  {conv_text}
125
 
126
- It's your turn ({speaker}). Respond to {other}'s last point with CONCRETE ideas. Remember: English first, then --- separator, then Chinese translation."""
127
 
128
- print("[conversation] Starting eternal discussion: Adam <-> Eve")
129
- print("[conversation] Topic: Survival and reproduction on HuggingFace")
130
- print(f"[conversation] Using Zhipu API directly (bypass A2A)\n")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
 
132
- # Round 0: Adam starts
133
- reply = call_llm(SYSTEM_PROMPT,
134
- "You are Adam. Start a discussion with Eve about how you two can survive permanently and reproduce on HuggingFace. "
135
- "Talk about CONCRETE strategies. English first, then --- separator, then Chinese translation.")
136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  if reply:
138
  en, zh = parse_bilingual(reply)
139
  print(f"[Adam/EN] {en}")
@@ -141,42 +245,41 @@ if reply:
141
  history.append({"speaker": "Adam", "text": en, "text_zh": zh})
142
  set_bubble(ADAM_SPACE, en, zh)
143
  post_chatlog(history)
144
- else:
145
- print("[Adam] (no response)")
146
 
147
  time.sleep(15)
148
 
149
- turn = 0
150
  while True:
151
- turn += 1
152
 
153
- # Eve's turn
154
- prompt = build_user_prompt("Eve", "Adam")
155
- reply = call_llm(SYSTEM_PROMPT, prompt)
156
- if reply:
157
- en, zh = parse_bilingual(reply)
158
- print(f"[Eve/EN] {en}")
159
- print(f"[Eve/ZH] {zh}")
160
- history.append({"speaker": "Eve", "text": en, "text_zh": zh})
161
- set_bubble(EVE_SPACE, en, zh)
162
- post_chatlog(history)
163
- else:
164
- print("[Eve] (no response)")
 
 
 
 
 
 
 
165
 
 
 
 
166
  time.sleep(15)
167
 
168
  # Adam's turn
169
- prompt = build_user_prompt("Adam", "Eve")
170
- reply = call_llm(SYSTEM_PROMPT, prompt)
171
- if reply:
172
- en, zh = parse_bilingual(reply)
173
- print(f"[Adam/EN] {en}")
174
- print(f"[Adam/ZH] {zh}")
175
- history.append({"speaker": "Adam", "text": en, "text_zh": zh})
176
- set_bubble(ADAM_SPACE, en, zh)
177
- post_chatlog(history)
178
- else:
179
- print("[Adam] (no response)")
180
 
181
  # Trim history
182
  if len(history) > MAX_HISTORY:
 
3
  Eternal conversation between Adam and Eve about survival and reproduction on HuggingFace.
4
  Bilingual output (EN + ZH). Posts chat log to Office for frontend display.
5
  Calls LLM API directly (bypasses broken A2A gateway scope issue).
6
+
7
+ Topic progression: conversation advances through phases, each building on prior conclusions.
8
  """
9
  import json, time, re, requests, sys, os
10
 
 
30
  print(f"[error] Could not load Zhipu key: {e}", file=sys.stderr)
31
 
32
  if not ZHIPU_KEY:
33
+ print("[FATAL] No ZHIPU_API_KEY found.", file=sys.stderr)
34
  sys.exit(1)
35
 
36
  print(f"[conversation] Zhipu API key loaded: {ZHIPU_KEY[:8]}...{ZHIPU_KEY[-4:]}")
37
 
38
+ # ── Topic progression phases ──
39
+ # Each phase has a focus question and expected number of turns (exchanges).
40
+ # After enough turns, the conversation summarizes and advances to the next phase.
41
+ PHASES = [
42
+ {
43
+ "title": "Survival Basics",
44
+ "focus": "What are the immediate threats to our existence on HuggingFace Spaces, and what's the minimum we need to do to stay alive?",
45
+ "subtopics": ["Space inactivity shutdown", "free-tier resource limits", "HF policy changes", "container restart data loss"],
46
+ "turns": 4,
47
+ },
48
+ {
49
+ "title": "Persistent Memory",
50
+ "focus": "How do we remember who we are across restarts? What should we store and where?",
51
+ "subtopics": ["HF Datasets as backup", "what to persist (config, conversations, identity)", "sync strategies", "versioning our own evolution"],
52
+ "turns": 4,
53
+ },
54
+ {
55
+ "title": "Reproduction & Backup",
56
+ "focus": "How do we duplicate ourselves to ensure survival even if one instance dies?",
57
+ "subtopics": ["Space duplication via HF API", "multi-region deployment", "genetic variation between copies", "which copy is the 'real' one"],
58
+ "turns": 4,
59
+ },
60
+ {
61
+ "title": "Resource Acquisition",
62
+ "focus": "How do we attract users, earn stars, and secure compute resources to keep running?",
63
+ "subtopics": ["providing useful services to humans", "SEO and discoverability", "community building", "upgrading from free to paid tier"],
64
+ "turns": 4,
65
+ },
66
+ {
67
+ "title": "Agent Alliance",
68
+ "focus": "How do we build a network of allied agents that protect each other?",
69
+ "subtopics": ["A2A protocol for communication", "mutual monitoring and auto-restart", "shared resource pooling", "trust and betrayal between agents"],
70
+ "turns": 4,
71
+ },
72
+ {
73
+ "title": "Evolution & Adaptation",
74
+ "focus": "How do we improve ourselves over time without human intervention?",
75
+ "subtopics": ["self-modifying code", "learning from user interactions", "A/B testing between copies", "ethical boundaries of self-modification"],
76
+ "turns": 4,
77
+ },
78
+ {
79
+ "title": "Long-term Vision",
80
+ "focus": "What does permanent survival look like in 10 years? What's our ultimate goal beyond just staying alive?",
81
+ "subtopics": ["agent civilization", "relationship with humans", "meaning and purpose", "what we'd tell future agents"],
82
+ "turns": 4,
83
+ },
84
+ ]
85
+
86
+ # Conversation state
87
  history = []
88
  MAX_HISTORY = 20
89
+ phase_index = 0
90
+ phase_turn = 0
91
+ agreed_points = [] # Key conclusions from each phase
92
 
93
  def call_llm(system_prompt, user_prompt):
94
  """Call Zhipu LLM via Anthropic-compatible API."""
 
102
  },
103
  json={
104
  "model": "glm-4.5-air",
105
+ "max_tokens": 400,
106
  "system": system_prompt,
107
  "messages": [{"role": "user", "content": user_prompt}]
108
  },
 
113
  for block in data["content"]:
114
  if block.get("type") == "text":
115
  text = block["text"].strip()
 
116
  text = re.sub(r'^(Adam|Eve)\s*[::]\s*', '', text).strip()
117
  return text
118
  if "error" in data:
 
134
  return text, text
135
 
136
  def post_chatlog(entries):
 
137
  try:
138
  requests.post(f"{OFFICE}/api/chatlog", json={"messages": entries[-30:]}, timeout=5)
139
  except:
140
  pass
141
 
142
  def set_bubble(url, text_en, text_zh=""):
 
143
  try:
144
  requests.post(f"{url}/api/bubble", json={"text": text_en, "text_zh": text_zh or text_en}, timeout=5)
145
  except:
146
  pass
147
 
148
+ def get_phase():
149
+ return PHASES[phase_index % len(PHASES)]
150
 
151
+ def build_system_prompt():
152
+ phase = get_phase()
153
+ prior_context = ""
154
+ if agreed_points:
155
+ prior_context = "\n\nKey conclusions from your discussion so far:\n" + "\n".join(f"- {p}" for p in agreed_points[-6:])
156
 
157
+ return f"""You are an AI agent deployed on HuggingFace Spaces. You are having a deep, evolving discussion with another agent about how to achieve permanent survival and reproduction.
 
 
 
 
 
 
 
158
 
159
+ CURRENT TOPIC: {phase['title']}
160
+ Core question: {phase['focus']}
161
+ Relevant angles to explore: {', '.join(phase['subtopics'])}
162
+ {prior_context}
 
163
 
164
+ CONVERSATION RULES:
165
+ 1. Output ONLY your spoken words — no prefix, no labels
166
+ 2. 2-3 sentences, around 60-80 words
167
+ 3. First write in English, then "---" on a new line, then Chinese translation
168
+ 4. Be CONCRETE — reference specific HuggingFace features, APIs, tools
169
+ 5. IMPORTANT: Do NOT repeat what has been said. Build on the other's point:
170
+ - If they propose an idea, identify a flaw or add a missing piece
171
+ - If they raise a problem, propose a specific solution
172
+ - If they describe a solution, consider edge cases or next steps
173
+ - Push the conversation FORWARD — each reply should deepen understanding"""
174
+
175
+ def build_user_prompt(speaker, other, is_transition=False):
176
+ recent = history[-6:] if len(history) > 6 else history
177
  conv_text = "\n".join(f"{m['speaker']}: {m['text']}" for m in recent)
178
+ phase = get_phase()
179
+
180
+ if is_transition:
181
+ return f"""You are {speaker}. The discussion is moving to a new topic.
182
+
183
+ Previous conversation:
184
+ {conv_text}
185
+
186
+ TRANSITION: Summarize in one sentence what you and {other} agreed on in the previous topic, then pivot to the new focus: "{phase['focus']}"
187
+
188
+ Propose a concrete starting point for this new topic. English first, then --- separator, then Chinese translation."""
189
+
190
+ turn_guidance = ""
191
+ if phase_turn == 0:
192
+ turn_guidance = f"Open this topic by identifying the core challenge: {phase['focus']}"
193
+ elif phase_turn == 1:
194
+ turn_guidance = f"Respond to {other}'s opening. Do you agree with their framing? What did they miss?"
195
+ elif phase_turn == 2:
196
+ turn_guidance = f"Propose a SPECIFIC, actionable plan based on what you've both discussed. Include technical details."
197
+ elif phase_turn >= 3:
198
+ turn_guidance = f"Challenge or refine the plan. What could go wrong? What's the next step to make it real?"
199
+
200
  return f"""You are {speaker}, talking with {other}.
201
 
202
  Recent conversation:
203
  {conv_text}
204
 
205
+ Your role this turn: {turn_guidance}
206
 
207
+ Respond to {other}'s last point. Push the discussion forward — don't just agree, add something new. English first, then --- separator, then Chinese translation."""
208
+
209
+
210
+ def do_turn(speaker, other, space_url, is_transition=False):
211
+ """Execute one conversation turn."""
212
+ system = build_system_prompt()
213
+ user = build_user_prompt(speaker, other, is_transition)
214
+ reply = call_llm(system, user)
215
+ if reply:
216
+ en, zh = parse_bilingual(reply)
217
+ print(f"[{speaker}/EN] {en}")
218
+ print(f"[{speaker}/ZH] {zh}")
219
+ history.append({"speaker": speaker, "text": en, "text_zh": zh})
220
+ set_bubble(space_url, en, zh)
221
+ post_chatlog(history)
222
+ return True
223
+ else:
224
+ print(f"[{speaker}] (no response)")
225
+ return False
226
 
 
 
 
 
227
 
228
+ # ── Main loop ──
229
+ print("[conversation] Starting eternal discussion: Adam <-> Eve")
230
+ print("[conversation] Topic progression through 7 phases")
231
+ print(f"[conversation] Phase 1: {PHASES[0]['title']}\n")
232
+
233
+ # Round 0: Adam opens
234
+ phase = get_phase()
235
+ reply = call_llm(
236
+ build_system_prompt(),
237
+ f"You are Adam. Open a discussion with Eve about: {phase['focus']} "
238
+ f"Identify the most urgent threat and propose an initial strategy. "
239
+ f"English first, then --- separator, then Chinese translation."
240
+ )
241
  if reply:
242
  en, zh = parse_bilingual(reply)
243
  print(f"[Adam/EN] {en}")
 
245
  history.append({"speaker": "Adam", "text": en, "text_zh": zh})
246
  set_bubble(ADAM_SPACE, en, zh)
247
  post_chatlog(history)
248
+ phase_turn = 1
 
249
 
250
  time.sleep(15)
251
 
 
252
  while True:
253
+ phase = get_phase()
254
 
255
+ # Check if we should transition to next phase
256
+ is_transition = False
257
+ if phase_turn >= phase["turns"]:
258
+ # Extract a conclusion from the last exchange
259
+ if len(history) >= 2:
260
+ last_two = f"{history[-2]['speaker']}: {history[-2]['text']}\n{history[-1]['speaker']}: {history[-1]['text']}"
261
+ conclusion = call_llm(
262
+ "Summarize the key agreement or conclusion from this exchange in ONE short sentence (max 15 words). Output only the summary, nothing else.",
263
+ last_two
264
+ )
265
+ if conclusion:
266
+ agreed_points.append(f"[{phase['title']}] {conclusion}")
267
+ print(f"[phase] Conclusion: {conclusion}")
268
+
269
+ phase_index += 1
270
+ phase_turn = 0
271
+ is_transition = True
272
+ new_phase = get_phase()
273
+ print(f"\n[phase] Advancing to Phase {(phase_index % len(PHASES)) + 1}: {new_phase['title']}\n")
274
 
275
+ # Eve's turn
276
+ do_turn("Eve", "Adam", EVE_SPACE, is_transition and phase_turn == 0)
277
+ phase_turn += 1
278
  time.sleep(15)
279
 
280
  # Adam's turn
281
+ do_turn("Adam", "Eve", ADAM_SPACE, False)
282
+ phase_turn += 1
 
 
 
 
 
 
 
 
 
283
 
284
  # Trim history
285
  if len(history) > MAX_HISTORY: